@H_301_1@python爬取福布斯富人榜并进行数据可视化
@H_301_1@@H_301_1@一,选题背景
如今,人类社会已经进入了大数据时代,数据已经成为必不可少的部分,可见数据的获取非常重要,而数据的获取的方式大概有下面几种。
1、企业生产的数据,大型互联网公司有海量的用户,所以他们积累数据有天然的优势
2、数据管理资讯公司
3、政府/机构提供的公开数据
4、第三方数据平台购买数据爬虫爬取数据
@H_301_1@二,爬虫设计方案
1、项目名称:python爬取福布斯富人榜并进行数据可视化
2、数据的获取与数据特点的分析
@H_301_1@@H_301_1@三,结构特征分析
1、页面解析:
@H_301_1@四,程序设计
1、读取爬取页面链接结构
def loadalldata(): alldata = [] for i in range(1,16,1): url = "https://www.phb123.com/renwu/fuhao/shishi_"+str(i)+".HTML" data = loaddata(url) alldata = alldata + data return alldata
2、读取一页的数据
def loaddata(url): from bs4 import BeautifulSoup import requests headers = { 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/72.0.3626.121 Safari/537.36' } f = requests.get(url,headers=headers) soup = BeautifulSoup(f.content, "lxml") # print(f.content.decode()) ranktable = soup.find_all('table',class_="rank-table" )[0] trList = ranktable.find_all('tr') trList.pop(0) persionList = [] for tr in trList: persion = {} persion['num'] = tr.find_all('td')[0].string persion['name'] = tr.find_all('td')[1].p.string persion['money'] = tr.find_all('td')[2].string persion['company'] = tr.find_all('td')[3].string persion['country'] = tr.find_all('td')[4].a.string persionList.append(persion) print("页面"+url+"爬取成功") return persionList
3、将爬取到的数据保存到本地Excel文件
def savedata(path,persionList): import xlwt workbook = xlwt.Workbook() worksheet = workbook.add_sheet('test') worksheet.write(0, 0, '排名') worksheet.write(0, 1, '姓名') worksheet.write(0, 2, '财富') worksheet.write(0, 3, '企业') worksheet.write(0, 4, '国家') for i in range(1,len(persionList)+1,1): worksheet.write(i, 0, persionList[i-1]['num']) worksheet.write(i, 1, persionList[i-1]['name']) worksheet.write(i, 2, persionList[i-1]['money']) worksheet.write(i, 3, persionList[i-1]['company']) worksheet.write(i, 4, persionList[i-1]['country']) workbook.save(path) print("数据保存成功:"+path)
4、取出排行榜前十的姓名和财富数据 以两个List返回
def loadtop10(path): import xlrd book = xlrd.open_workbook(path) sheet1 = book.sheets()[0] nameList = sheet1.col_values(1) moneyList = sheet1.col_values(2) nameList = nameList[1:11] moneyList = moneyList[1:11] moneyList2 = [] for a in moneyList: a = int(a[0:-3]) moneyList2.append(a) print("取出排行榜前十的姓名和财富数据") print(nameList) print(moneyList2) return nameList,moneyList2def countcountrynum(path): import xlrd book = xlrd.open_workbook(path) sheet1 = book.sheets()[0] countryList = sheet1.col_values(4)[1:-1] print(countryList) countryset = List(set(countryList)) dictList = [] for country in countryset: obj = {"name":country,"count":0} dictList.append(obj) for obj in dictList: for a in countryList: if obj['name'] == a: obj['count'] = obj['count'] + 1 print(dictList) for i in range(0,len(dictList),1): for j in range(0,len(dictList)-i-1,1): if dictList[j]['count'] < dictList[j+1]['count']: temp = dictList[j] dictList[j] = dictList[j+1] dictList[j+1] = temp dictList2 = dictList[0:5] set2 = [] for a in dictList2: set2.append(a['name']) othercount = 0; for a in dictList: if a['name'] not in set2: othercount = othercount + 1 dictList2.append({"name":"其他","count":othercount}) print('获取排行榜中每个国家的上榜人数') print(dictList2) return dictList2def drow(): import matplotlib.pyplot as plt plt.rcParams['Font.sans-serif'] = ['SimHei'] plt.figure('福布斯前十榜',figsize=(15,5)) Listx,Listy = loadtop10('rank.xls') plt.Title('福布斯前十榜', Fontsize=16) plt.xlabel('人物', Fontsize=14) plt.ylabel('金额/亿美元', Fontsize=14) plt.tick_params(labelsize=10) plt.grID(linestyle=':', axis='y') a = plt.bar(Listx, Listy, color='dodgerblue', label='Apple', align='center') for i in a: h = i.get_height() plt.text(i.get_x() + i.get_wIDth() / 2, h, '%d' % int(h), ha='center', va='bottom') dictList = countcountrynum("rank.xls") plt.figure('各国家上榜人数所占比例') labels = [] sizes = [] for a in dictList: labels.append(a['name']) sizes.append(a['count']) explode = (0.1, 0, 0, 0, 0, 0) plt.pIE(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=False, startangle=150) plt.Title("各国家上榜人数所占比例", Fontsize=16) plt.axis('equal') plt.show()
5、完整代码
## 读取一页的数据def loaddata(url): from bs4 import BeautifulSoup import requests headers = { 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/72.0.3626.121 Safari/537.36' } f = requests.get(url,headers=headers) #Get该网页从而获取该HTML内容 soup = BeautifulSoup(f.content, "lxml") #用lxml解析器解析该网页的内容, 好像f.text也是返回的HTML # print(f.content.decode()) #尝试打印出网页内容,看是否获取成功 ranktable = soup.find_all('table',class_="rank-table" )[0] #获取排行榜表格 trList = ranktable.find_all('tr') #获取表格中所有tr标签 trList.pop(0) #去掉第一个元素 persionList = [] for tr in trList: persion = {} persion['num'] = tr.find_all('td')[0].string #编号 persion['name'] = tr.find_all('td')[1].p.string #名称 persion['money'] = tr.find_all('td')[2].string #财产 persion['company'] = tr.find_all('td')[3].string #企业 persion['country'] = tr.find_all('td')[4].a.string #国家 persionList.append(persion) print("页面"+url+"爬取成功") return persionList ## 读取所有福布斯排行榜数据def loadalldata(): alldata = [] for i in range(1,16,1): url = "https://www.phb123.com/renwu/fuhao/shishi_"+str(i)+".HTML" data = loaddata(url) alldata = alldata + data return alldata ## 将爬取的数据保存到文件def savedata(path,persionList): import xlwt workbook = xlwt.Workbook() worksheet = workbook.add_sheet('test') worksheet.write(0, 0, '排名') worksheet.write(0, 1, '姓名') worksheet.write(0, 2, '财富') worksheet.write(0, 3, '企业') worksheet.write(0, 4, '国家') for i in range(1,len(persionList)+1,1): worksheet.write(i, 0, persionList[i-1]['num']) worksheet.write(i, 1, persionList[i-1]['name']) worksheet.write(i, 2, persionList[i-1]['money']) worksheet.write(i, 3, persionList[i-1]['company']) worksheet.write(i, 4, persionList[i-1]['country']) workbook.save(path) print("数据保存成功:"+path) ## 取出排行榜前十的姓名和财富数据 以两个List返回def loadtop10(path): import xlrd book = xlrd.open_workbook(path) sheet1 = book.sheets()[0] nameList = sheet1.col_values(1) moneyList = sheet1.col_values(2) nameList = nameList[1:11] moneyList = moneyList[1:11] moneyList2 = [] for a in moneyList: a = int(a[0:-3]) moneyList2.append(a) print("取出排行榜前十的姓名和财富数据") print(nameList) print(moneyList2) return nameList,moneyList2 ## 统计排行榜中每个国家的上榜人数 以字典List返回def countcountrynum(path): import xlrd book = xlrd.open_workbook(path) sheet1 = book.sheets()[0] countryList = sheet1.col_values(4)[1:-1] print(countryList) countryset = List(set(countryList)) dictList = [] for country in countryset: obj = {"name":country,"count":0} dictList.append(obj) ## 统计出每个国家对应的数量 for obj in dictList: for a in countryList: if obj['name'] == a: obj['count'] = obj['count'] + 1 print(dictList) ## 将dictList排序 数量多的放前面 8 5 6 9 3 2 4 for i in range(0,len(dictList),1): for j in range(0,len(dictList)-i-1,1): if dictList[j]['count'] < dictList[j+1]['count']: temp = dictList[j] dictList[j] = dictList[j+1] dictList[j+1] = temp dictList2 = dictList[0:5] set2 = [] for a in dictList2: set2.append(a['name']) othercount = 0; for a in dictList: if a['name'] not in set2: othercount = othercount + 1 dictList2.append({"name":"其他","count":othercount}) print('获取排行榜中每个国家的上榜人数') print(dictList2) return dictList2 ## 绘制条形图和饼状图def drow(): import matplotlib.pyplot as plt plt.rcParams['Font.sans-serif'] = ['SimHei'] # 设置中文字体 plt.figure('福布斯前十榜',figsize=(15,5)) ## 读取福布斯排行榜前十的数据 Listx,Listy = loadtop10('rank.xls') plt.Title('福布斯前十榜', Fontsize=16) plt.xlabel('人物', Fontsize=14) plt.ylabel('金额/亿美元', Fontsize=14) plt.tick_params(labelsize=10) plt.grID(linestyle=':', axis='y') a = plt.bar(Listx, Listy, color='dodgerblue', label='Apple', align='center') # 设置标签 for i in a: h = i.get_height() plt.text(i.get_x() + i.get_wIDth() / 2, h, '%d' % int(h), ha='center', va='bottom') ## ------------------------------------------------------------------------- dictList = countcountrynum("rank.xls") plt.figure('各国家上榜人数所占比例') labels = [] sizes = [] for a in dictList: labels.append(a['name']) sizes.append(a['count']) explode = (0.1, 0, 0, 0, 0, 0) plt.pIE(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=False, startangle=150) plt.Title("各国家上榜人数所占比例", Fontsize=16) plt.axis('equal') # 该行代码使饼图长宽相等 plt.show() if __name__ == '__main__': ## 爬取数据 data = loadalldata() ## 保存数据 savedata("rank.xls",data) # py文件同级目录创建rank.xls文件 ## 展示数据 drow()
@H_301_1@五,效果实现
1、获取到数据的本地Excel文件
@H_301_1@
2、福布斯排行榜前十的人物数据可视化效果
@H_301_1@
3、各个国家上榜人数与整体相比所占比例的统计与可视化
@H_301_1@六,项目总结
@H_301_1@1、得到的结论
根据饼图发现,美国上榜人数最多,然后中国紧随其后,其次是德国、俄罗斯、印度……等等国家。根据柱状图发现,福布斯排行榜的大部分都所属美国,只有第三位与第十位是法国人。
@H_301_1@ 2、收获
这次爬虫项目的设计与实施让我学会了爬取网站的信息与获取的数据分析。因为这次的项目就很不熟练导致到处碰壁,所以需要学会的知识点还有很多,还需要不断的学习与改进。
总结
以上是内存溢出为你收集整理的python爬取福布斯富人榜并进行数据可视化全部内容,希望文章能够帮你解决python爬取福布斯富人榜并进行数据可视化所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)