python写的有声小说爬虫

python写的有声小说爬虫,第1张

概述querybook.py from bs4 import BeautifulSoupfrom lxml import htmlimport xmlimport requestsimport spliderclass QuName: def __init__(self,number): self.number = number def getPageNum(

querybook.py

from bs4 import BeautifulSoupfrom lxml import HTMLimport xmlimport requestsimport splIDerclass Quname:    def __init__(self,number):        self.number = number    def getPageNum(self,url):        f = requests.get(url)  # Get该网页从而获取该HTML内容        soup = BeautifulSoup(f.content,"lxml")        try:            pageNum = soup.find('div',class_="pagesnums").find('span').text            print('getPageNum执行成功')            return int(pageNum[3:5])        except:            print('getPageNum执行失败')        finally:            print('___________________________')    def getBookList(self):        for num in range(1,self.number):            pageNum = self.getPageNum('http://www.ting89.com/bookList/'+str(num)+'.HTML')            self.getBookInfo('http://www.ting89.com/bookList/'+str(num)+'.HTML')            print('http://www.ting89.com/bookList/'+str(num)+'.HTML')            for num1 in range(2,pageNum):                self.getBookInfo('http://www.ting89.com/bookList/'+str(num)+'_'+str(num1)+'.HTML')                print('http://www.ting89.com/bookList/'+str(num)+'_'+str(num1)+'.HTML')    def getBookInfo(self,"lxml")        try:            bookList = soup.find('div',class_="cList").findAll('li')            for i in bookList:                imgurl = i.find('img')                print('书籍封面',imgurl['src'])                # print('书名:',i.find('b').text)                pList = i.findAll('p')                for j in pList:                    print(j.text)                #下载文件                splIDer.YsspIDer(i.find('b').text).download_files()        except:            print('getBookInfo执行失败')        finally:            print('___________________________')qn = Quname(13)         #这里是网站的类别数量(偷了个懒,直接写了个数字)qn.getBookList()

splIDer.py

import requestsimport urllibimport reimport osimport timeclass YsspIDer:    def __init__(self,name):        self.search_name = name        self.search_url = "http://www.ting89.com/search.asp?searchword="        self.home_url = "http://www.ting89.com/books/"        self.index_pattern = r"""<a href="/books/([0-9]+).HTML" title="(.+?)" target='_blank'>"""        self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.HTML)' target="_blank">(.+?)</a>"""        self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""        self.book_ID = ''        self.book_name = ''        self.Chapter_List = []    # 返回搜索书目的ID    def searchbook(self):        file = requests.get(self.search_url + urllib.parse.quote(self.search_name,enCoding='gb2312'))        data = file.content.decode('gbk')        result = re.findall(self.index_pattern,data)        if len(result):            for index,i in enumerate(result):                print('%d.%s'%(index+1,i[1]))                # str = input("输入你要下载的书目名称序号: ")                str = '1'                self.book_name = result[int(str)-1][1]                self.book_ID = result[int(str)-1][0]                return self.book_ID            else:                print('*******没有找到你输入的相关书籍,请更换后重新运行程序*******')                exit()    def get_chapter_List(self):#获取各章节List和url        data = requests.get(self.home_url+self.searchbook()+'.HTML').content.decode('gbk')        result = re.findall(self.chapter_pattern,data)        return result    def _getAllUrl(self):# 获得所有的章节的下载地址        chapter_List = self.get_chapter_List()        chapter = [x[0] for x in chapter_List]        self.Chapter_List= [x[1] for x in chapter_List]        _List = [x[1] for x in chapter_List]        data = requests.get("http://www.ting89.com" + chapter[0]).content.decode('gbk')        result = re.findall(self.down_pattern,data)        # return result        return self.sub_get_url(result[0][0],_List,re.search("^0.*1$",result[0][1]))    def sub_get_url(self,down_url,down_url_flag):        url = []        if down_url_flag:            xulIE = List(range(len(_List)))            weishu = len(str(xulIE[-1]))            for i in xulIE:                i1 = i + 1                tmp_url = down_url+'/' + str(i1).zfill(weishu) + '.mp3'                url.append(urllib.request.quote(tmp_url,safe='/:?='))        else:            for item in _List:                tmp_url = down_url + '/'+item + ".mp3"                url.append(urllib.request.quote(tmp_url,safe='/:?='))        return url# 保存指定URL的文件    def save_a_file(self,url,path,chapter):        try:            print('尝试下载',chapter)            if not os.path.exists(path):                response = requests.get(url)                with open(path,'wb') as f:                    f.write(response.content)                    f.close                    print(chapter,'保存成功')                response.close()                time.sleep(1)            else:                print('文件已经存在')        except:            print('爬取失败,已下载至',chapter,'即将重新尝试下载')            self.save_a_file(url,chapter)    def download_files(self):        result = self._getAllUrl()# 所有的章节对应的下载地址        root = os.path.join(os.getcwd(),self.book_name)        if not os.path.exists(root):            os.mkdir(root)        for index,i in enumerate(result):            path = os.path.join(root,self.Chapter_List[index])+'.mp3'            self.save_a_file(i,self.Chapter_List[index])
总结

以上是内存溢出为你收集整理的python写的有声小说爬虫全部内容,希望文章能够帮你解决python写的有声小说爬虫所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/1191959.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-03
下一篇 2022-06-03

发表评论

登录后才能评论

评论列表(0条)

保存