Python爬虫实例-必应壁纸批量爬取

Python爬虫实例-必应壁纸批量爬取,第1张

概述完整代码importrequestsfromlxmlimportetreeimportosdefget_user_input():print('要下载哪几页呀?可以输在下边,像这样"468",用空格分开,或者中间加个减号表示范围,像这样"4-7"')user_input=input()iflen(user_input)==1:start_end_= 完整代码
import requestsfrom lxml import etreeimport osdef get_user_input():    print('要下载哪几页呀?可以输在下边,像这样"4 6 8",用空格分开,或者中间加个减号表示范围,像这样"4-7"')    user_input = input()    if len(user_input) == 1:        start_end_ = user_input        print('你要下载的这页:' + str(start_end_))    else:        if '-' in user_input:            test = List(user_input.replace('-', ' ').split())            start_end_ = List(range(int(test[0]), int(test[1]) + 1))            print('你要下载的是这些页:' + str(start_end_))        else:            start_end_ = [int(n) for n in user_input.split()]            print('你要下载的是这些页:' + str(start_end_))    return start_end_def get_page_urls(start_end_):    all_page_urls = []    for num in start_end_:        all_page_urls.append('https://bing.ioliu.cn/?p={}'.format(str(num)))    print('这是你要下载图片的网址:')    print(all_page_urls)    return all_page_urlsif __name__ == '__main__':    header = {'User-Agent': 'w'}    page_number = 0    start_end = get_user_input()    for page_url in get_page_urls(start_end):        img_number = 1        res = requests.get(page_url, headers=header).text        HTML = etree.HTML(res)        img_url = HTML.xpath('//img/@src')        if not os.path.exists('D:/Downloads/bing_wallpaper'):            os.mkdir('D:/Downloads/bing_wallpaper')        print('正在下载第{}页图片'.format(start_end[page_number]))        for img_List in img_url:            img_List = img_List.replace('640x480', '1920x1080')            img = requests.get(img_List, headers=header).content            HTML_text = HTML.xpath("/HTML/body/div[3]/div[" + str(img_number) + "]/div/div[1]/h3/text()")[0]            HTML_text_format = str(HTML_text).replace(',', '_').replace('/', '_')            img_name = (str(page_number * 12 + img_number) + '_' + str(HTML_text_format) + '.jpg')            with open('D:\\Downloads\\bing_wallpaper\\' + img_name, 'wb') as save_img:                # 写入图片数据                save_img.write(img)                img_number += 1        page_number += 1
要点导包

import requests
from lxml import etree
import os

定义一个函数,可以获取user的input(只是单纯为了方便),start_end_用来记录所爬页的页码

def get_user_input():
print(‘要下载哪几页呀?可以输在下边,像这样"4 6 8",用空格分开,或者中间加个减号表示范围,像这样"4-7"’)
user_input = input()
if len(user_input) == 1:
start_end_ = user_input
print(‘你要下载的这页:’ + str(start_end_))
else:
if ‘-’ in user_input:
test = List(user_input.replace(’-’, ’ ').split())
start_end_ = List(range(int(test[0]), int(test[1]) + 1))
print(‘你要下载的是这些页:’ + str(start_end_))
else:
start_end_ = [int(n) for n in user_input.split()]
print(‘你要下载的是这些页:’ + str(start_end_))
return start_end_

定义一个函数,可以获取要爬取所有所需页的url(这样看着比较清晰)

def get_page_urls(start_end_):
all_page_urls = []
for num in start_end_:
all_page_urls.append(‘https://bing.ioliu.cn/?p={}’.format(str(num)))
print(‘这是你要下载图片的网址:’)
print(all_page_urls)
return all_page_urls

主函数

if __ name__ == ‘__ main__’:

给个header

header = {‘User-Agent’: ‘w’}

page_number 用来记录下载的页数,下边配合(start_end[page_number]))用

page_number = 0
start_end = get_user_input()

这里开始解析网页

for page_url in get_page_urls(start_end):
img_number = 1
res = requests.get(page_url, headers=header).text
HTML = etree.HTML(res)
img_url = HTML.xpath(’//img/@src’)

创建文件夹

if not os.path.exists(‘D:/Downloads/bing_wallpaper’):
os.mkdir(‘D:/Downloads/bing_wallpaper’)

调整图片的分辨率

img_List = img_List.replace(‘640x480’, ‘1920x1080’)

get content

img = requests.get(img_List, headers=header).content

这里自己想一个命名图片的规则就可以了,比如我是像这样“1_新河峡国家公园中的新河峡大桥_西弗吉尼亚州 (© Entropy Workshop_iStock_Getty Images Plus)”

HTML_text = HTML.xpath("/HTML/body/div[3]/div[" + str(img_number) + “]/div/div[1]/h3/text()”)[0]
HTML_text_format = str(HTML_text).replace(’,’, ‘’).replace(’/’, '’)
img_name = (str(page_number * 12 + img_number) + ‘_’ + str(HTML_text_format) + ‘.jpg’)

把图片保存到文件夹里

with open(‘D:\Downloads\bing_wallpaper\’ + img_name, ‘wb’) as save_img:
save_img.write(img)
img_number += 1

这里记得把page_number +1

page_number += 1

总结

以上是内存溢出为你收集整理的Python爬虫实例-必应壁纸批量爬取全部内容,希望文章能够帮你解决Python爬虫实例-必应壁纸批量爬取所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/1187358.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-03
下一篇 2022-06-03

发表评论

登录后才能评论

评论列表(0条)

保存