urllib基础模块的应用,通过该类获取到url中的HTML文档信息,内部可以重写代理的获取方法
复制代码 代码如下:
class ProxyScrapy(object):
def __init__(self):
self.proxy_robot = ProxyRobot()
self.current_proxy = None
self.cookie = cookielib.cookieJar()
def __builder_proxy_cookie_opener(self):
cookie_handler = urllib2.httpcookieProcessor(self.cookie)
handlers = [cookie_handler]
if PROXY_ENABLE:
self.current_proxy = ip_port = self.proxy_robot.get_random_proxy()
proxy_handler = urllib2.ProxyHandler({'http': ip_port[7:]})
handlers.append(proxy_handler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
return opener
def get_HTML_body(self,url):
opener = self.__builder_proxy_cookie_opener()
request=urllib2.Request(url)
#request.add_header("Accept-EnCoding","gzip,deflate,sdch")
#request.add_header("Accept","text/HTML,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
#request.add_header("Cache-Control","no-cache")
#request.add_header("Connection","keep-alive")
try:
response = opener.open(request,timeout=2)
http_code = response.getcode()
if http_code == 200:
if PROXY_ENABLE:
self.proxy_robot.handle_success_proxy(self.current_proxy)
HTML = response.read()
return HTML
else:
if PROXY_ENABLE:
self.proxy_robot.handle_double_proxy(self.current_proxy)
return self.get_HTML_body(url)
except Exception as inst:
print inst,self.current_proxy
self.proxy_robot.handle_double_proxy(self.current_proxy)
return self.get_HTML_body(url)
以上是内存溢出为你收集整理的python使用urllib模块和pyquery实现阿里巴巴排名查询全部内容,希望文章能够帮你解决python使用urllib模块和pyquery实现阿里巴巴排名查询所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)