1.爬取一页的图片
正则匹配提取图片数据
网页源代码部分截图如下:
重新设置 GBK 编码解决了乱码问题
代码实现:
import requestsimport re# 设置保存路径path = r'D:\test\picture_1\ '# 目标urlurl = "http://pic.netbian.com/4kmeinv/index.HTML"# 伪装请求头 防止被反爬headers = { "User-Agent": "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1", "Referer": "http://pic.netbian.com/4kmeinv/index.HTML"}# 发送请求 获取响应response = requests.get(url, headers=headers)# 打印网页源代码来看 乱码 重新设置编码解决编码问题# 内容正常显示 便于之后提取数据response.enCoding = 'GBK'# 正则匹配提取想要的数据 得到图片链接和名称img_info = re.findall('img src="(.*?)" alt="(.*?)" /', response.text)for src, name in img_info: img_url = 'http://pic.netbian.com' + src # 加上 'http://pic.netbian.com'才是真正的图片url img_content = requests.get(img_url, headers=headers).content img_name = name + '.jpg' with open(path + img_name, 'wb') as f: # 图片保存到本地 print(f"正在为您下载图片:{img_name}") f.write(img_content)复制代码
Xpath定位提取图片数据
代码实现:
import requestsfrom lxml import etree# 设置保存路径path = r'D:\test\picture_1\ '# 目标urlurl = "http://pic.netbian.com/4kmeinv/index.HTML"# 伪装请求头 防止被反爬headers = { "User-Agent": "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1", "Referer": "http://pic.netbian.com/4kmeinv/index.HTML"}# 发送请求 获取响应response = requests.get(url, headers=headers)# 打印网页源代码来看 乱码 重新设置编码解决编码问题# 内容正常显示 便于之后提取数据response.enCoding = 'GBK'HTML = etree.HTML(response.text)# xpath定位提取想要的数据 得到图片链接和名称img_src = HTML.xpath('//ul[@]/li/a/img/@src')# 列表推导式 得到真正的图片urlimg_src = ['http://pic.netbian.com' + x for x in img_src]img_alt = HTML.xpath('//ul[@]/li/a/img/@alt')for src, name in zip(img_src, img_alt): img_content = requests.get(src, headers=headers).content img_name = name + '.jpg' with open(path + img_name, 'wb') as f: # 图片保存到本地 print(f"正在为您下载图片:{img_name}") f.write(img_content)复制代码
2.翻页爬取,实现批量下载
单线程版
import requestsfrom lxml import etreeimport datetimeimport time# 设置保存路径path = r'D:\test\picture_1\ 'headers = { "User-Agent": "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1", "Referer": "http://pic.netbian.com/4kmeinv/index.HTML"}start = datetime.datetime.Now()def get_img(urls): for url in urls: # 发送请求 获取响应 response = requests.get(url, headers=headers) # 打印网页源代码来看 乱码 重新设置编码解决编码问题 # 内容正常显示 便于之后提取数据 response.enCoding = 'GBK' HTML = etree.HTML(response.text) # xpath定位提取想要的数据 得到图片链接和名称 img_src = HTML.xpath('//ul[@]/li/a/img/@src') # 列表推导式 得到真正的图片url img_src = ['http://pic.netbian.com' + x for x in img_src] img_alt = HTML.xpath('//ul[@]/li/a/img/@alt') for src, name in zip(img_src, img_alt): img_content = requests.get(src, headers=headers).content img_name = name + '.jpg' with open(path + img_name, 'wb') as f: # 图片保存到本地 # print(f"正在为您下载图片:{img_name}") f.write(img_content) time.sleep(1)def main(): # 要请求的url列表 url_List = ['http://pic.netbian.com/4kmeinv/index.HTML'] + [f'http://pic.netbian.com/4kmeinv/index_{i}.HTML' for i in range(2, 11)] get_img(url_List) delta = (datetime.datetime.Now() - start).total_seconds() print(f"抓取10页图片用时:{delta}s")if __name__ == '__main__': main()复制代码
程序运行成功,抓取了10页的图片,共210张,用时63.682837s。
多线程版
import requestsfrom lxml import etreeimport datetimeimport timeimport randomfrom concurrent.futures import ThreadPoolExecutor# 设置保存路径path = r'D:\test\picture_1\ 'user_agent = [ "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", "Mozilla/5.0 (windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", "Mozilla/5.0 (windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", "Mozilla/5.0 (windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", "Mozilla/5.0 (windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ]start = datetime.datetime.Now()def get_img(url): headers = { "User-Agent": random.choice(user_agent), "Referer": "http://pic.netbian.com/4kmeinv/index.HTML" } # 发送请求 获取响应 response = requests.get(url, headers=headers) # 打印网页源代码来看 乱码 重新设置编码解决编码问题 # 内容正常显示 便于之后提取数据 response.enCoding = 'GBK' HTML = etree.HTML(response.text) # xpath定位提取想要的数据 得到图片链接和名称 img_src = HTML.xpath('//ul[@]/li/a/img/@src') # 列表推导式 得到真正的图片url img_src = ['http://pic.netbian.com' + x for x in img_src] img_alt = HTML.xpath('//ul[@]/li/a/img/@alt') for src, name in zip(img_src, img_alt): img_content = requests.get(src, headers=headers).content img_name = name + '.jpg' with open(path + img_name, 'wb') as f: # 图片保存到本地 # print(f"正在为您下载图片:{img_name}") f.write(img_content) time.sleep(random.randint(1, 2))def main(): # 要请求的url列表 url_List = ['http://pic.netbian.com/4kmeinv/index.HTML'] + [f'http://pic.netbian.com/4kmeinv/index_{i}.HTML' for i in range(2, 51)] with ThreadPoolExecutor(max_workers=6) as executor: executor.map(get_img, url_List) delta = (datetime.datetime.Now() - start).total_seconds() print(f"爬取50页图片用时:{delta}s")if __name__ == '__main__': main()复制代码
程序运行成功,抓取了50页图片,共1047张,用时56.71979s。开多线程大大提高的爬取数据的效率。
最终成果如下:
总结以上是内存溢出为你收集整理的总说手机没有“好壁纸”,Python一次性抓取500张“美女”图片,够不够用!全部内容,希望文章能够帮你解决总说手机没有“好壁纸”,Python一次性抓取500张“美女”图片,够不够用!所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)