python爬虫 | 爬取巨潮资讯上的上市公司招股说明书

python爬虫 | 爬取巨潮资讯上的上市公司招股说明书,第1张

python爬虫 | 爬取巨潮资讯上的上市公司招股说明书

许久未更新,来踩个脚印~~

笔记是在巨潮资讯上爬取上市公司的招股说明书,如果要爬取别的,可以更换关键词

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/11/2 22:53
# @Author  : Claire
# @File    : 爬取上市公司招股说明书-巨潮.py
# @Software: PyCharm


import requests
import json
import os
import urllib.parse
import pprint
import urllib.request


# 创建说明书保存路径-data
try:
    os.mkdir('./data')
except:
    pass


# 创建存储的文件和字典
f_succ = open('./data/urls.csv','w',encoding="utf-8")
f_PDF = open('./data/stk_PDF.csv','w',encoding="utf-8")
f_lose = open('./data/stk_lose.csv','w',encoding="utf-8")
urldict={}    # 存储url的字典


# 获取上市公司股票代码
stks = open('stkcd.csv','r',encoding="utf-8")


# 访问网址,获取招股说明书url
for stk in stks:
    stkcd = str(stk).zfill(7).replace('n', '').replace('r', '').replace('uFEFF', '')
    print(stkcd)

    # 发送查询请求,获取响应
    # 网页网址:http://www.cninfo.com.cn/new/fulltextSearch?notautosubmit=&keyWord=603213%20%E6%8B%9B%E8%82%A1%E8%AF%B4%E6%98%8E%E4%B9%A6
    # 存储数据网址:http://www.cninfo.com.cn/new/fulltextSearch/full?searchkey=603213+招股说明书&sdate=&edate=&isfulltext=false&sortName=pubdate&sortType=desc&pageNum=1
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36','cookie':'JSESSIonID=BC620348EF0877559E75F78A04B3E3B9; cninfo_user_browse=301040,gfbj0833879,%E4%B8%AD%E7%8E%AF%E6%B5%B7%E9%99%86; _sp_ses.2141=*; insert_cookie=45380249; routeId=.uc2; SID=d8b4d86a-a298-4a59-8651-6bc86e886302; _sp_id.2141=2932aeb7-c4b3-4c5c-9618-aeaf7c07a1b9.1635838728.2.1635853694.1635840276.2dca209a-da35-42a1-967f-477bfe068f00'}
    keyword = urllib.parse.quote('招股说明书')  # 查找招股说明书,可修改关键词

    url = 'http://www.cninfo.com.cn/new/fulltextSearch/full?searchkey='+stkcd + '+' + keyword + '&sdate=&edate=&isfulltext=false&sortName=pubdate&sortType=desc&pageNum=1'
    # print(url)
    response = requests.get(url,headers=headers)
    content = response.content.decode(encoding='utf-8')
    dict = json.loads(content)
    # print(dict)

    try:
        for i in dict['announcements']:
            if '摘要' not in i['announcementTitle']:
                if '和路演公告' not in i['announcementTitle']:
                    if '附录' not in i['announcementTitle']:
                        if '事宜' not in i['announcementTitle'] :
                            if '确认意见' not in i['announcementTitle']:
                                if 'H股' not in i['announcementTitle']:
                                    if '确认函' not in i['announcementTitle']:
                                        if '增发' not in i['announcementTitle']:
                                            print(i['announcementTitle'])

                                            # 获取url
                                            # http://static.cninfo.com.cn/finalpage/2021-10-29/1211425539.PDF
                                            url = 'http://static.cninfo.com.cn/'+i['adjunctUrl']
                                            # print(url)
                                            secname = i['secName'].replace('*', '')
                                            date = i['adjunctUrl'][10:20]

                                            #存储找到的url
                                            if 'PDF' in url:
                                                urldict.update({stkcd + '-' + secname + '-' + date: url})
                                                csv_write = stkcd + ',' + secname + ',' + date + ',' + url + 'n'
                                                f_succ.write(csv_write)

                                            else:
                                                stk_PDF = stkcd + 'n'
                                                print(stkcd + 'PDF格式的文件未找到')
                                                f_PDF.write(stk_PDF)

    # 存储未找到的stkcd,方便再次循环
    except:
        stk_lose = stkcd + 'n'
        print(stkcd+'查询失败')
        f_lose.write(stk_lose)

pprint.pprint(urldict)
f_succ.close()
f_PDF.close()
f_lose.close()



# 根据解析出的pdf地址下载到output,并重命名成有规律的文件
for name in urldict:
    url=urldict[name]
    response = urllib.request.urlopen(url)
    file = open('./data/'+name+".pdf", 'wb')
    file.write(response.read())
    file.close()
    print(name)




最后,希望11月有好好背单词,并且最重要的是要开心,顺利,进步

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/5073652.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-11-16
下一篇 2022-11-16

发表评论

登录后才能评论

评论列表(0条)

保存