Python爬取当当、京东、亚马逊图书信息代码实例

Python爬取当当、京东、亚马逊图书信息代码实例,第1张

概述注:1.本程序采用MSSQLserver数据库存储,请运行程序前手动修改程序开头处的数据库链接信息

注:1.本程序采用MSsqlserver数据库存储,请运行程序前手动修改程序开头处的数据库链接信息

2.需要bs4、requests、pymssql库支持

3.支持多线程

from bs4 import BeautifulSoup import re,requests,pyMysqL,threading,os,traceback  try:   conn = pyMysqL.connect(host='127.0.0.1',port=3306,user='root',passwd='root',db='book',charset="utf8")   cursor = conn.cursor() except:   print('\n错误:数据库连接失败')  #返回指定页面的HTML信息 def getHTMLText(url):   try:     headers = {'User-Agent':'Mozilla/5.0 (windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/56.0.2924.87 Safari/537.36'}     r = requests.get(url,headers = headers)     r.raise_for_status()     r.enCoding = r.apparent_enCoding     return r.text   except:     return '' #返回指定url的Soup对象 def getSoupObject(url):   try:     HTML = getHTMLText(url)     soup = BeautifulSoup(HTML,'HTML.parser')     return soup   except:     return '' #获取该关键字在图书网站上的总页数 def getPageLength(webSitename,url):   try:     soup = getSoupObject(url)     if webSitename == 'DangDang':       a = soup('a',{'name':'bottom-page-turn'})       return a[-1].string     elif webSitename == 'Amazon':       a = soup('span',{'class':'pagnDisabled'})       return a[-1].string   except:     print('\n错误:获取{}总页数时出错...'.format(webSitename))     return -1  class DangDangThread(threading.Thread):   def __init__(self,keyword):     threading.Thread.__init__(self)     self.keyword = keyword   def run(self):     print('\n提示:开始爬取当当网数据...')     count = 1        length = getPageLength('DangDang','http://search.dangdang.com/?key={}'.format(self.keyword))#总页数     tablename = 'db_{}_dangdang'.format(self.keyword)      try:       print('\n提示:正在创建DangDang表...')       cursor.execute('create table {} (ID int,Title text,prNow text,prPre text,link text)'.format(tablename))       print('\n提示:开始爬取当当网页面...')       for i in range(1,int(length)):         url = 'http://search.dangdang.com/?key={}&page_index={}'.format(self.keyword,i)         soup = getSoupObject(url)         lis = soup('li',{'class':re.compile(r'line'),'ID':re.compile(r'p')})         for li in lis:           a = li.find_all('a',{'name':'itemList-Title','dd_name':'单品标题'})           pn = li.find_all('span',{'class': 'search_Now_price'})           pp = li.find_all('span',{'class': 'search_pre_price'})            if not len(a) == 0:             link = a[0].attrs['href']             Title = a[0].attrs['Title'].strip()           else:             link = 'NulL'             Title = 'NulL'            if not len(pn) == 0:             prNow = pn[0].string           else:             prNow = 'NulL'            if not len(pp) == 0:             prPre = pp[0].string           else:             prPre = 'NulL'           sql = "insert into {} (ID,Title,prNow,prPre,link) values ({},'{}','{}')".format(tablename,count,link)           cursor.execute(sql)           print('\r提示:正在存入当当数据,当前处理ID:{}'.format(count),end='')           count += 1           conn.commit()     except:       pass class AmazonThread(threading.Thread):   def __init__(self,keyword):     threading.Thread.__init__(self)     self.keyword = keyword    def run(self):     print('\n提示:开始爬取亚马逊数据...')     count = 1     length = getPageLength('Amazon','https://www.amazon.cn/s/keywords={}'.format(self.keyword))#总页数     tablename = 'db_{}_amazon'.format(self.keyword)          try:       print('\n提示:正在创建Amazon表...')       cursor.execute('create table {} (ID int,link text)'.format(tablename))          print('\n提示:开始爬取亚马逊页面...')       for i in range(1,int(length)):         url = 'https://www.amazon.cn/s/keywords={}&page={}'.format(self.keyword,{'ID':re.compile(r'result_')})         for li in lis:           a = li.find_all('a',{'class':'a-link-normal s-access-detail-page a-text-normal'})           pn = li.find_all('span',{'class': 'a-size-base a-color-price s-price a-text-bold'})           if not len(a) == 0:             link = a[0].attrs['href']             Title = a[0].attrs['Title'].strip()           else:             link = 'NulL'             Title = 'NulL'            if not len(pn) == 0:             prNow = pn[0].string           else:             prNow = 'NulL'            sql = "insert into {} (ID,link)           cursor.execute(sql)           print('\r提示:正在存入亚马逊数据,end='')           count += 1           conn.commit()     except:       pass class JDThread(threading.Thread):   def __init__(self,keyword):     threading.Thread.__init__(self)     self.keyword = keyword   def run(self):     print('\n提示:开始爬取京东数据...')     count = 1      tablename = 'db_{}_jd'.format(self.keyword)          try:       print('\n提示:正在创建JD表...')       cursor.execute('create table {} (ID int,link text)'.format(tablename))       print('\n提示:开始爬取京东页面...')       for i in range(1,100):         url = 'https://search.jd.com/Search?keyword={}&page={}'.format(self.keyword,{'class':'gl-item'})         for li in lis:           a = li.find_all('div',{'class':'p-name'})           pn = li.find_all('div',{'class': 'p-price'})[0].find_all('i')            if not len(a) == 0:             link = 'http:' + a[0].find_all('a')[0].attrs['href']             Title = a[0].find_all('em')[0].get_text()           else:             link = 'NulL'             Title = 'NulL'                      if(len(link) > 128):             link = 'Toolong'            if not len(pn) == 0:             prNow = '¥'+ pn[0].string           else:             prNow = 'NulL'           sql = "insert into {} (ID,link)           cursor.execute(sql)           print('\r提示:正在存入京东网数据,end='')           count += 1           conn.commit()     except :       pass def closeDB():   global conn,cursor   conn.close()   cursor.close()  def main():   print('提示:使用本程序,请手动创建空数据库:Book,并修改本程序开头的数据库连接语句')   keyword = input("\n提示:请输入要爬取的关键字:")    dangdangThread = DangDangThread(keyword)   amazonThread = AmazonThread(keyword)   jdThread = JDThread(keyword)    dangdangThread.start()   amazonThread.start()   jdThread.start()   dangdangThread.join()   amazonThread.join()   jdThread.join()    closeDB()    print('\n爬取已经结束,即将关闭....')   os.system('pause')    main() 

示例截图:

关键词:AndroID下的部分运行结果(以导出至Excel)

总结

以上就是本文关于Python爬取当当、京东、亚马逊图书信息代码实例的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站:

python爬取亚马逊书籍信息代码分享

Python爬虫实例爬取网站搞笑段子

Python探索之爬取电商售卖信息代码示例

如有不足之处,欢迎留言指出。感谢朋友们对本站的支持!

总结

以上是内存溢出为你收集整理的Python爬取当当、京东、亚马逊图书信息代码实例全部内容,希望文章能够帮你解决Python爬取当当、京东、亚马逊图书信息代码实例所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/langs/1201090.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-04
下一篇 2022-06-04

发表评论

登录后才能评论

评论列表(0条)

保存