python小白一个对scrapy不是很熟悉,今天想要从数据库中提取url然后遍历抓取,但是百度了大半天都没有找到案例,特此在此留下记录。
import scrapy
from scrapy import signals
import time
class scrapy_xiangmuSpider(scrapy.Spider):
name = "scrapy_xiangmu"
def start_requests(self):
for url in self.get_url_list():
try:
yield scrapy.Request(url=url, callback=self.parse, errback=self.errback_httpbin)
except:
print("请求错误:" + url)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(scrapy_xiangmuSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signals.spider_closed)
crawler.signals.connect(spider.spider_idle, signals.spider_idle)
return spider
# 爬虫空闲状态时,再获取url抓取
def spider_idle(self, spider):
url_list = self.get_url_list() # 从数据库中提取url
for url in url_list:
try:
self.crawler.engine.crawl(scrapy.Request(url, callback=self.parse, errback=self.http_error), spider)
except:
print("请求错误:" + url)
def spider_closed(self, spider):
self.logger.info('关闭蜘蛛了')
# 提取内容
def parse(self, response):
print(response.url)
def http_error(self, error):
self.logger.info('请求错误')
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)