postgresql 定时收集表和索引统计信息

postgresql 定时收集表和索引统计信息,第1张

概述--由于pg中表和索引的信息收集都是基于时间点的,对于以往的信息无法与现在的信息进行对比,故写下此工具进行统计信息收集--创建数据信息的schemacreate schema db_stat;--创建收集信息的基础表create table db_stat.snapshot_pg_stat_all_indexes(relid int,indexrelid int,schemaname
--由于pg中表和索引的信息收集都是基于时间点的,对于以往的信息无法与现在的信息进行对比,故写下此工具进行统计信息收集--创建数据信息的schemacreate schema db_stat;--创建收集信息的基础表create table db_stat.snapshot_pg_stat_all_indexes(relID int,indexrelID int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),IDx_scan bigint,IDx_tup_read bigint,IDx_tup_fetch bigint,snAPId int,snap_create_time timestamp,host_ip cIDr,host_port int,host_type varchar(20),comment varchar(500));create index IDx_stat_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_stat_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time);create index IDx_stat_indexe_snAPId on db_stat.snapshot_pg_stat_all_indexes(snAPId);create table db_stat.snapshot_pg_stat_all_tables(relID int,seq_scan bigint,seq_tup_read bigint,n_tup_ins bigint,n_tup_upd bigint,n_tup_del bigint,n_tup_hot_upd bigint,n_live_tup bigint,n_dead_tup bigint,last_vacuum timestamp,last_autovacuum timestamp,last_analyze timestamp,last_autoanalyze timestamp,vacuum_count bigint,autovacuum_count bigint,analyze_count bigint,autoanalyze_count bigint,comment varchar(500));create index IDx_stat_table_relname_createtime on db_stat.snapshot_pg_stat_all_tables(relname varchar_pattern_ops,snap_create_time);create index IDx_stat_table_snAPId on db_stat.snapshot_pg_stat_all_tables(snAPId);create table db_stat.snapshot_pg_statio_all_indexes(relID int,IDx_blks_read bigint,IDx_blks_hit bigint,comment varchar(500));create index IDx_statio_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_statio_all_indexes(relname varchar_pattern_ops,snap_create_time);create index IDx_statio_indexe_snAPId on db_stat.snapshot_pg_statio_all_indexes(snAPId);create table db_stat.snapshot_pg_statio_all_tables(relID int,heap_blks_read bigint,heap_blks_hit bigint,toast_blks_read bigint,toast_blks_hit bigint,tIDx_blks_read bigint,tIDx_blks_hit bigint,comment varchar(500));create index IDx_statio_table_relname_createtime on db_stat.snapshot_pg_statio_all_tables(relname varchar_pattern_ops,snap_create_time);create index IDx_statio_table_snAPId on db_stat.snapshot_pg_statio_all_tables(snAPId);--创建快照的序列create sequence db_stat.seq_snapshot minvalue 1 maxvalue 99999999999999;--每收集完信息之后,对时间,主机列等进行填充create or replace function db_stat.process_snapshot_table(in i_host_ip cIDr,in i_host_port int,in i_host_type varchar,in i_comment varchar default '') returns int as $$declarev_snAPId int;_detail text;_hint text;_message text;beginselect nextval('db_stat.seq_snapshot') into v_snAPId;update db_stat.snapshot_pg_stat_all_indexes set snAPId=v_snAPId,snap_create_time=Now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snAPId is null;update db_stat.snapshot_pg_stat_all_tables set snAPId=v_snAPId,comment=i_comment where snAPId is null;update db_stat.snapshot_pg_statio_all_indexes set snAPId=v_snAPId,comment=i_comment where snAPId is null;update db_stat.snapshot_pg_statio_all_tables set snAPId=v_snAPId,comment=i_comment where snAPId is null;-- 返回值 1 代表成功,0 代表失败return 1;EXCEPTION WHEN others thenGET STACKED DIAGNOSTICS_message = message_text,_detail = pg_exception_detail,_hint = pg_exception_hint;raise notice 'message: %,detail: %,hint: %',_message,_detail,_hint;return 0;end;  $$ language plpgsql;--收动进行信息采集,测试用INSERT INTO db_stat.snapshot_pg_stat_all_indexes(relID,indexrelID,schemaname,relname,indexrelname,IDx_scan,IDx_tup_read,IDx_tup_fetch)SELECT relID,IDx_tup_fetchFROM pg_stat_all_indexes;INSERT INTO db_stat.snapshot_pg_stat_all_tables(relID,seq_scan,seq_tup_read,IDx_tup_fetch,n_tup_ins,n_tup_upd,n_tup_del,n_tup_hot_upd,n_live_tup,n_dead_tup,last_vacuum,last_autovacuum,last_analyze,last_autoanalyze,vacuum_count,autovacuum_count,analyze_count,autoanalyze_count)SELECT relID,autoanalyze_countFROM pg_stat_all_tables;INSERT INTO db_stat.snapshot_pg_statio_all_indexes(relID,IDx_blks_read,IDx_blks_hit)SELECT relID,IDx_blks_hitFROM pg_statio_all_indexes;INSERT INTO db_stat.snapshot_pg_statio_all_tables(relID,heap_blks_read,heap_blks_hit,IDx_blks_hit,toast_blks_read,toast_blks_hit,tIDx_blks_read,tIDx_blks_hit)SELECT relID,tIDx_blks_hitFROM pg_statio_all_tables;--select db_stat.process_snapshot_table('192.168.174.10',5432,'MASTER','');--创建一个shell脚本,每天通过定时任务进行信息采集cat snap_stat.sh#!/bin/shsource ~/.bash_profilesource /etc/profilePsql="psql"help_msg (){        echo ""        echo "Usage:"        echo "  -f              要输出结果的文件,如果为null,则默认为/tmp/snapshot_pg_stat.log"        echo "  -u              数据库连接用户名,如果为null,则为postgresql默认"        echo "  -d              连接的数据库名,则为postgresql默认"        echo "  -H              数据库的主机ip,则为postgresql默认"        echo "  -p              数据库的端口,则为postgresql默认"        echo "  -m              数据库的类型,MASTER为主,SLAVE为从"        echo ""        exit 0}# end functionswhile getopts "f:u:d:H:p:m:" flagdo        case $flag in                f) filename=$OPTARG                        ;;                u) USERname=$OPTARG                        ;;                d) DATABASE=$OPTARG                        ;;                H) HOST=$OPTARG                        ;;                p) PORT=$OPTARG                        ;;                m) DATABASE_TYPE=$OPTARG                        ;;                        \?|h) help_msg                        ;;        esacdoneif [ $USERname"x" == "x" ]thenUSERname=postgresfiif [ $DATABASE"x" == "x" ]thenDATABASE=postgresfiif [ $HOST"x" == "x" ]thenhelp_msgfiif [ $PORT"x" == "x" ]thenPORT=5432fiif [ $DATABASE_TYPE"x" == "x" ]thenDATABASE_TYPE=MASTERfiif [ $filename"x" == "x" ]thenfilename=/tmp/snapshot_pg_stat.logfiOUTPUT_filename=/tmp/snapshot_pg_stat.csvecho "" > $filenameif [ ! -f $filename ]        then        touch $filenameelse    printf "" | tee -a $filenamefiecho "脚本于时间 `date "+%Y-%m-%d %H:%M:%s"` 开始执行" >> $filenameecho "脚本开始于`date "+%Y-%m-%d %H:%M:%s"` 处理pg_stat_all_indexes表" >> $filename$Psql -p $PORT -U $USERname -d $DATABASE -c "copy (select relID,IDx_tup_fetch from pg_stat_all_indexes) to '$OUTPUT_filename' with csv"$Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_indexes(relID,IDx_tup_fetch) from '$OUTPUT_filename' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%s"` 处理pg_stat_all_tables表" >> $filename$Psql -p $PORT -U $USERname -d $DATABASE -c "copy (select relID,autoanalyze_count from pg_stat_all_tables) to '$OUTPUT_filename' with csv"$Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_tables(relID,autoanalyze_count) from '$OUTPUT_filename' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%s"` 处理pg_statio_all_indexes表" >> $filename$Psql -p $PORT -U $USERname -d $DATABASE -c "copy (select relID,IDx_blks_hit from pg_statio_all_indexes) to '$OUTPUT_filename' with csv"$Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_indexes(relID,IDx_blks_hit) from '$OUTPUT_filename' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%s"` 处理pg_statio_all_tables表" >> $filename$Psql -p $PORT -U $USERname -d $DATABASE -c "copy (select relID,tIDx_blks_hit from pg_statio_all_tables) to '$OUTPUT_filename' with csv"$Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_tables(relID,tIDx_blks_hit) from '$OUTPUT_filename' with csv"$Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "select db_stat.process_snapshot_table('$HOST',$PORT,'$DATABASE_TYPE','database stat snapshot');"echo "############################################################################################" >> $filenameecho "脚本于时间 `date "+%Y-%m-%d %H:%M:%s"` 结束执行" >> $filename--清空数据表truncate table db_stat.snapshot_pg_stat_all_indexes ;truncate table db_stat.snapshot_pg_stat_all_tables ;truncate table db_stat.snapshot_pg_statio_all_indexes ;truncate table db_stat.snapshot_pg_statio_all_tables ;--手动执行shell脚本./snap_stat.sh -d mydb -p 5432 -m SLAVE -u postgres -H 192.168.174.10--定时任务,每天8点开始执行8 8 * * * /db/pgsql/snap_stat.sh -d mydb -p 5435 -m SLAVE -u postgres -H 192.168.174.10--查看使用比较少的索引select *       from (           SELECT t.relname,t.indexrelname,max(IDx_scan)-min(IDx_scan) AS diff_IDx_scan,max(IDx_tup_read)-min(IDx_tup_read) AS diff_IDx_tup_read           FROM db_stat.snapshot_pg_stat_all_indexes t           --WHERE snap_create_time BETWEEN '2015-12-11' AND '2016-03-11'           GROUP BY t.relname,t.indexrelname) t1order by diff_IDx_scan,indexrelname ;--查看索引使用率趋势图select relname,snap_day,diff_IDx_scan,case when sum(diff_IDx_scan) over w1 >0 then  diff_IDx_scan*100/sum(diff_IDx_scan) over w1 else 0 end as  diff_IDx_scan_percent,diff_IDx_tup_read,case when sum(diff_IDx_tup_read) over w1 >0 then  diff_IDx_tup_read*100/sum(diff_IDx_tup_read) over w1  else 0 end as diff_IDx_tup_read_percent from (      SELECT t.relname,date_trunc('hour',snap_create_time) snap_day,t.IDx_scan-lag(t.IDx_scan,1) over w AS diff_IDx_scan,t.IDx_tup_read - lag(t.IDx_tup_read,1) over w AS diff_IDx_tup_read      from db_stat.snapshot_pg_stat_all_indexes t       --where indexrelname in ('','')      WINDOW w AS (PARTITION BY t.relname,t.indexrelname ORDER BY date_trunc('hour',t.snap_create_time))) t1 where diff_IDx_scan is not nullWINDOW w1 as (PARTITION BY t1.relname,t1.indexrelname)order by relname,snap_day;
总结

以上是内存溢出为你收集整理的postgresql 定时收集表和索引统计信息全部内容,希望文章能够帮你解决postgresql 定时收集表和索引统计信息所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/sjk/1174719.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-02
下一篇 2022-06-02

发表评论

登录后才能评论

评论列表(0条)

保存