使用re,urllib,threading 多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名
复制代码 代码如下:
#Coding:utf-8
import urllib
import re
import threading
import os,time
class Down_Tianya(threading.Thread):
"""多线程下载"""
def __init__(self,url,num,dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
HTML_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div >楼主</div> -->.*?<div ""根据第一页地址抓取总页数"""
HTML_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" >下页</a>')
page_result = page_pattern.search(HTML_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict,fn):
"""把字典内容按键(页数)写入文本,每个键值为每页内容的List列表"""
tx_file = open(fn,'w+')
pn = len(dict)
for i in range(1,pn+1):
tx_List = dict[i]
for tx in tx_List:
tx = tx.replace('<br>','\r\n').replace('<br />','\r\n').replace(' ','')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.sHTML'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根据页数构造urls进行多线程下载"""
for num in range(1,my_page+1):
myurl = '%s%s.sHTML' % (url[:-7],num)
downList = Down_Tianya(myurl,my_dict)
downList.start()
threads.append(downList)
"""检查下载完成后再进行写入"""
for t in threads:
t.join()
write_text(my_dict,file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
down_tianya.py
复制代码 代码如下:
#Coding:utf-8
import urllib
import re
import threading
import os
class Down_Tianya(threading.Thread):
"""多线程下载"""
def __init__(self,dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
HTML_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<div .*?<span>时间:(.*?)</span>.*?<!-- <div >楼主</div> -->.*?<div adsbygoogle" data-ad-layout="in-article" data-ad-format="fluid" data-ad-client="ca-pub-4605373693034661" data-ad-slot="1300602012"> 总结
以上是内存溢出为你收集整理的python多线程抓取天涯帖子内容示例全部内容,希望文章能够帮你解决python多线程抓取天涯帖子内容示例所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)