由于不同乙方对服务商业务接口字段理解不一致,导致线上上千万数据量数据存在问题,为了修复数据,通过Python脚本进行修改
目的记录与分享
知识点Python3、线程池、pymysql、CSV文件 *** 作、requests
拓展当我们程序在使用到线程、进程或协程的时候,以下三个知识点可以先做个基本认知,自行百度吧
CPU密集型、IO密集型、GIL全局解释器锁
库pip3 install requests
pip3 install pymysql
实现代码
# -*- coding:utf-8 -*-
# @FileName:grade_update.py
# @Author :YYQ
# @Desc :在一台超级计算机上运行过的牛逼Python代码
import time
import requests
import pymysql
from projectPath import path
import queue
from concurrent.futures import ThreadPoolExecutor
class BoundedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
self._work_queue = queue.Queue(self._max_workers * 2) # 队列大小为最大线程数的两倍
gradeId = [4303, 4304, 1000926, 1000927]
def writ_mysql():
"""
:return:
"""
return pymysql.connect(host="localhost",
port=3306,
user="admin",
password="admin",
database="test"
)
def oprationdb(grade_id, member_id):
db = writ_mysql()
try:
cursor = db.cursor()
sql = f"UPDATE `t_m_member_grade` SET `current_grade_id`={grade_id}, `modified` =now() WHERE `member_id`={member_id};"
cursor.execute(sql)
db.commit()
print(f"提交的SQL->{sql}")
except pymysql.Error as e:
db.rollback()
print("DB数据库异常:", e)
db.close()
return True
def interface(rows, thead):
print(f"处理数据行数--->{thead}----数据--->{rows}")
try:
# url = "http://localhost:8080/api/elc-data/Tmall/bindQuery"
# body = {
# "nickname": str(rows[0]),
# "seller_name": "xxxxx官方旗舰店",
# "mobile": "111"
# }
# heade={"Content-Type": "application/x-www-form-urlencoded"}
# res = requests.post(url=url, data=body,headers=heade)
# result = res.json()
# if result["data"]["status"] in [1, 2]:
# grade = result["data"]["member"]["level"]
# grade_id = gradeId[grade]
# oprationdb(grade_id=grade_id, member_id=rows[1])
# return True
return True
except Exception as e:
print(f"调用异常:{e}")
def update_data():
import csv
# db = writ_mysql()
# 使用csv的reader()方法,创建一个reader对象
MAX_WORKERS = 5
with BoundedThreadPoolExecutor(MAX_WORKERS) as pool:
with open(path + '/file/result2_colu.csv', 'r', newline='', encoding='utf-8') as f:
obj_list = []
seq_done = set()
reader = csv.reader(f)
n = 0
for row in reader:
n += 1
# 遍历reader对象的每一行
try:
pool.submit(interface, rows=row, thead=n)
except Exception as e:
print(f"解析结果出错:{e}")
# db.close()
pool.shutdown()
if __name__ == '__main__':
update_data()
解释
引入线程池库
from concurrent.futures import ThreadPoolExecutor
重写ThreadPoolExecutor类,用来定义有界队列,作用是为了在处理大数据量任务的时候不会让内存持续增加,导致服务器内存溢出
pool.submit(interface, rows=row, thead=n)
提交任务,interface调用的函数,rows、thead为interface()函数的入参
任务持续提交,线程池通过 MAX_WORKERS 定义的线程数持续消费
说明,像这种I/O密集型的 *** 作脚本适合使用多线程,如果是CPU密集型建议使用进行,根据机器核数进行配置
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)