复制代码 代码如下:
from selenium import selenium
class MySpIDer(CrawlSpIDer):
name = 'cnbeta'
allowed_domains = ['cnbeta.com']
start_urls = ['http://www.jb51.net']
rules = (
# Extract links matching 'category.PHP' (but not matching 'subsection.PHP')
# and follow links from them (since no callback means follow=True by default).
Rule(SgmllinkExtractor(allow=('/articles/.*\.htm',)),
callback='parse_page',follow=True),
# Extract links matching 'item.PHP' and parse them with the spIDer's method parse_item
)
def __init__(self):
CrawlSpIDer.__init__(self)
self.verificationErrors = []
self.selenium = selenium("localhost",4444,"*firefox","http://www.jb51.net")
self.selenium.start()
def __del__(self):
self.selenium.stop()
print self.verificationErrors
CrawlSpIDer.__del__(self)
def parse_page(self,response):
self.log('Hi,this is an item page! %s' % response.url)
sel = Selector(response)
from webproxy.items import WebproxyItem
sel = self.selenium
sel.open(response.url)
sel.wait_for_page_to_load("30000")
import time
time.sleep(2.5)
以上是内存溢出为你收集整理的python使用scrapy解析js示例全部内容,希望文章能够帮你解决python使用scrapy解析js示例所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)