python用了协程以后爬虫程序

python用了协程以后爬虫程序,第1张

没有用之前
#coding:utf-8
import requests
from lxml import etree
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
# 线程池
import time
header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.56'
}
def demand(url):
    r=requests.get(url,headers=header)
    code=r.status_code
    if code==200:
       jiexin(r.text)
    else:
        return "产生异常"
def jiexin(html):
    tree=etree.HTML(html)
    hrefs=tree.xpath("//div[@class='box_con']//dl/dd/a/@href")

    for href in hrefs[:100]:
            url='https://www.biquge.biz'+href
            wenben(url)

            # time.sleep(1.5)
def wenben(url):
        r=requests.get(url,headers=header).text
        tree=etree.HTML(r)
        title=tree.xpath("//div[@class='bookname']/h1/text()")[0]
        print(title)
        soup=BeautifulSoup(r,'lxml')
        content=soup.find(id="content").text
        contents=content.split()
        for content in contents:
            content=content+'\n'
            fp=open(r'D:\图片爬虫练习\笔趣阁\测试1\'+title+'.txt','a',encoding='utf-8')
            fp.write(content)


if __name__ == '__main__':
    t1=time.time()
    url=""#就不带url
    demand(url)
    t2=time.time()
    print(t2-t1)
用了协程以后
import requests
from lxml import etree
import asyncio
import aiofiles
import aiohttp
from bs4 import BeautifulSoup
# from concurrent.futures import ThreadPoolExecutor
# 线程池
import time
header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.56'
}
def demand(url):
    r=requests.get(url,headers=header)
    code=r.status_code
    if code==200:
       asyncio.run(jiexin(r.text))
    else:
        return "产生异常"

async def jiexin(html):
    tree=etree.HTML(html)
    hrefs=tree.xpath("//div[@class='box_con']//dl/dd/a/@href")
    tasks=[]
    for href in hrefs[:100]:
        url='https://www.biquge.biz'+href
        t=wenben(url)
        tasks.append(t)
    await asyncio.wait(tasks)
    # asyncio.run(asyncio.wait(tasks))


            # time.sleep(1.5)
async def wenben(url):
        async with aiohttp.ClientSession() as session:
            async with session.get(url,headers=header) as session:
                r=await session.text()
                tree=etree.HTML(r)
                title=tree.xpath("//div[@class='bookname']/h1/text()")[0]
                print(title)
                soup=BeautifulSoup(r,'lxml')
                content=soup.find(id="content").text
                contents=content.split()
                for content in contents:
                    content=content+'\n'
                    async with aiofiles.open(r'D:\图片爬虫练习\笔趣阁\测试1\'+title+'.txt','a',encoding='utf-8')as f:
                        await f.write(content)


if __name__ == '__main__':
    t1=time.time()
    url=""
    demand(url)
    t2=time.time()
    print(t2-t1)

时间快了n倍,前100多秒,后不用10秒

侵权立删

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/787275.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-05
下一篇 2022-05-05

发表评论

登录后才能评论

评论列表(0条)

保存