#coding:gbk
from bs4 import BeautifulSoup #网页解析,获取数据
import re #正则表达式,进行文字匹配
import urllib.request,urllib.error #制定URL,获取网页数据
import xlwt #进行excel *** 作
#创建正则表达式对象,表示规则
findlink = re.compile(r'')
findjs = re.compile(r'">(.*)')
def main():
a = 1
#爬取网页,获取数据
baseurl = "https://news.163.com/"
Datelist,a = getDate(baseurl,a)
#保存数据
savepath = "新闻.xls"
saveDate(savepath,Datelist,a)
def getDate(baseurl,a):
datelist = []
#for i in range(0,10):
# url=baseurl+str(i*25)
#baseurl+"i*25"
html = askURL(baseurl)
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="hidden"): #查找符合要求的字符串,形成列表
for c in item.find_all('a'):
c = str(c)
date = []
head = findjs.findall(c) #获取新闻标题
date.append(head)
link = findlink.findall(c) #获取新闻链接
date.append(link)
a+=1 #计数
datelist.append(date)
return datelist,a
def saveDate(savepath,Datelist,a):
book = xlwt.Workbook(encoding="utf-8",style_compression=0) #创建workbook对象
sheet = book.add_sheet(savepath,cell_overwrite_ok=True) #创建工作表
crl=("新闻标题","新闻链接")
for i in range(0,len(crl)):
sheet.write(0,i,crl[i])
for i in range(1,a):
for j in range(0,len(crl)):
sheet.write(i,j,Datelist[i-1][j])
print("已保存第%.3d条新闻数据"%i)
print("保存完毕")
book.save(savepath)
#得到指定URL的网页内容
def askURL(url):
head = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"}
#模拟浏览器头部信息,向服务器发送消息
request = urllib.request.Request(url,headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html
if __name__ == "__main__":
main()
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)