先放代码, 内容稍后补充
- 目标url
https://www.mafengwo.cn/search/q.php?q=%E4%BC%8A%E7%8A%81&seid=F5A9009A-07AE-4577-A84B-F653CB21522B
import re import time import requests from wordcloud import WordCloud from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import matplotlib from PIL import Image import numpy as np from imageio import imread import jieba #评论内容所在的url,?后面是get请求需要的参数内容 def getComment(n, http): Comment = str() comment_url='http://pagelet.mafengwo.cn/poi/pagelet/poiCommentListApi?' for i in range(len(http)): poi = http[i][http[i].rfind("/") + 1 : http[i].rfind(".")] requests_headers={ 'Referer': http[i], 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36' }#请求头 for num in range(1, n): requests_data={ 'params': '{"poi_id":'+ poi + ',"page":"%d","just_comment":1}' % (num) #经过测试只需要用params参数就能爬取内容 } response =requests.get(url=comment_url,headers=requests_headers,params=requests_data) if 200 == response.status_code: page = response.content.decode('unicode-escape', 'ignore').encode('utf-8', 'ignore').decode('utf-8')#爬取页面并且解码 page = page.replace('\/', '/')#将/转换成/ #日期列表 date_pattern = r'评论.*?n.*?(.*?)' date_list = re.compile(date_pattern).findall(page) #星级列表 star_pattern = r'' star_list = re.compile(star_pattern).findall(page) #评论列表 comment_pattern = r'以伊犁哈萨克自治州为例([sS]*?)
' comment_list = re.compile(comment_pattern).findall(page) for num in range(0, len(date_list)): #日期 date = date_list[num] #星级评分 star = star_list[num] #评论内容,处理一些标签和符号 comment = comment_list[num] comment = str(comment).replace(' ', '') comment = comment.replace('
', '') comment = comment.replace('
', '') comment = comment.replace('
', '') print("日期:{0}, 评分:{1}, 评论:{2}".format(date, star, comment),num) Comment = Comment + comment else: print("爬取失败") return Comment def wordcloud_fig(comm, inpath, outpath, color, dpi): seg =" ".join(jieba.lcut(comm, cut_all = False, HMM=True)) word = ["田", "不","对", "上", "了", "的", "我", "在", "是", "也", "我们", "都", "这里", "这", "有", "去", "一个", "就", "没有", "已经","台", "阿富汗","才", "知道", "回回", "时候"] jieba.add_word("喀拉峻",freq = 20000, tag = None) #bg = np.array(Image.open("D:\Desktop\yili.png")) mask = imread(inpath, pilmode="RGB") #bg2 = plt.imread("D:\Desktop\bg.png") #bg3 = matplotlib.image.imread("D:\Desktop\bg.png") stopwords = set(STOPWORDS) for num in word: stopwords.add(num) wordcloud = WordCloud(scale=8, font_path="D:\Desktop\SimSun.ttf",width=2000, height=1860, background_color=None, stopwords=stopwords, mask=mask, max_font_size=None, random_state=None, relative_scaling='auto',repeat=False, mode='RGBA',colormap=color) wordcloud.generate(seg) #image_colors = ImageColorGenerator(bg) %pylab inline plt.imshow(wordcloud, interpolation='bilinear', cmap=plt.cm.gray) plt.axis("off") plt.savefig(outpath, dpi=dpi) #用反斜杠的话会报错 plt.show() #wordcloud.to_file('D:\Desktop\test.png',dpi=200)
http_yili = [ "https://www.mafengwo.cn/poi/6583819.html", "https://www.mafengwo.cn/poi/2026.html", "http://www.mafengwo.cn/poi/27788.html", "http://www.mafengwo.cn/poi/27802.html", "http://www.mafengwo.cn/poi/7693277.html", "http://www.mafengwo.cn/poi/28099.html", "http://www.mafengwo.cn/poi/28069.html" ] com_yili = getComment(n=10, http=http_yili) wordcloud_fig(com_yili, "D:\Desktop\ciyun\output_fig\yili2.png")
词云结果
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)