top of page

Forum Posts

Brian Pace
Jan 06, 2023
In Postgres Scripts
Activity -- pgMonitor Exporter (ccp_wal_activity) SELECT last_5_min_size_bytes, (SELECT COALESCE(sum(size),0) FROM pg_catalog.pg_ls_waldir()) AS total_size_bytes FROM (SELECT COALESCE(sum(size),0) AS last_5_min_size_bytes FROM pg_catalog.pg_ls_waldir() WHERE modification > CURRENT_TIMESTAMP - '5 minutes'::interval) x Current WAL LSN SELECT pg_current_wal_lsn(); Last WAL Received/Replayed (Replication Target) SELECT pg_last_wal_receive_lsn() last_received, pg_last_wal_replay_lsn() last_replayed
0
0
81
Brian Pace
Jan 06, 2023
In Postgres Scripts
General Size SELECT *, pg_size_pretty(total_bytes) AS total , pg_size_pretty(index_bytes) AS index , pg_size_pretty(toast_bytes) AS toast , pg_size_pretty(table_bytes) AS table FROM ( SELECT *, total_bytes-index_bytes-coalesce(toast_bytes,0) AS table_bytes FROM ( SELECT c.oid,nspname AS table_schema, relname AS table_name , c.reltuples AS row_estimate , pg_total_relation_size(c.oid) AS total_bytes , pg_indexes_size(c.oid) AS index_bytes , pg_total_relation_size(reltoastrelid) AS toast_bytes FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE relkind = 'r' ) a ) a; Check for Bloat SELECT objectname, pg_size_pretty(size_bytes) as object_size, pg_size_pretty(free_space_bytes) as reusable_space, pg_size_pretty(dead_tuple_size_bytes) dead_tuple_space, free_percent FROM bloat_stats; Column Stats select * from pg_stats where tablename='mytable'; Size (unpartitioned) WITH RECURSIVE pg_inherit(inhrelid, inhparent) AS (select inhrelid, inhparent FROM pg_inherits UNION SELECT child.inhrelid, parent.inhparent FROM pg_inherit child, pg_inherits parent WHERE child.inhparent = parent.inhrelid), pg_inherit_short AS (SELECT * FROM pg_inherit WHERE inhparent NOT IN (SELECT inhrelid FROM pg_inherit)) SELECT table_schema , TABLE_NAME , row_estimate , pg_size_pretty(total_bytes) AS total , pg_size_pretty(index_bytes) AS INDEX , pg_size_pretty(toast_bytes) AS toast , pg_size_pretty(table_bytes) AS TABLE , round(total_bytes::float8 / sum(total_bytes) OVER ()) AS total_size_share FROM ( SELECT *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM ( SELECT c.oid , nspname AS table_schema , relname AS TABLE_NAME , SUM(c.reltuples) OVER (partition BY parent) AS row_estimate , SUM(pg_total_relation_size(c.oid)) OVER (partition BY parent) AS total_bytes , SUM(pg_indexes_size(c.oid)) OVER (partition BY parent) AS index_bytes , SUM(pg_total_relation_size(reltoastrelid)) OVER (partition BY parent) AS toast_bytes , parent FROM ( SELECT pg_class.oid , reltuples , relname , relnamespace , pg_class.reltoastrelid , COALESCE(inhparent, pg_class.oid) parent FROM pg_class LEFT JOIN pg_inherit_short ON inhrelid = oid WHERE relkind IN ('r', 'p') ) c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace ) a WHERE oid = parent ) a ORDER BY total_bytes DESC; Size (partitioned) WITH RECURSIVEpg_inherit(inhrelid, inhparent) AS (selectinhrelid, inhparent FROMpg_inherits UNION SELECTchild.inhrelid, parent.inhparent FROMpg_inherit child, pg_inherits parent WHEREchild.inhparent = parent.inhrelid), pg_inherit_short AS(SELECT* FROMpg_inherit WHEREinhparent NOTIN(SELECTinhrelid FROMpg_inherit)) SELECTparent::regclass , coalesce(spcname, 'default') pg_tablespace_name , row_estimate , pg_size_pretty(total_bytes) AStotal , pg_size_pretty(index_bytes) ASINDEX , pg_size_pretty(toast_bytes) AStoast , pg_size_pretty(table_bytes) ASTABLE , round(100* total_bytes::float8/ sum(total_bytes) OVER()) ASPERCENT FROM( SELECT*, total_bytes-index_bytes-COALESCE(toast_bytes,0) AStable_bytes FROM( SELECTparent , reltablespace , SUM(c.reltuples) ASrow_estimate , SUM(pg_total_relation_size(c.oid)) AStotal_bytes , SUM(pg_indexes_size(c.oid)) ASindex_bytes , SUM(pg_total_relation_size(reltoastrelid)) AStoast_bytes FROM( SELECTpg_class.oid , reltuples , relname , relnamespace , reltablespace reltablespace , pg_class.reltoastrelid , COALESCE(inhparent, pg_class.oid) parent FROMpg_class LEFTJOINpg_inherit_short ONinhrelid = oid WHERErelkind IN('r', 'p') ) c GROUPBYparent, reltablespace ) a ) a LEFTJOINpg_tablespace ON(pg_tablespace.oid= reltablespace) ORDERBYtotal_bytes DESC; Table Size -- pgMonitor Exporter (ccp_table_size) SELECT current_database() as dbname, n.nspname as schemaname, c.relname, pg_total_relation_size(c.oid) as size_bytes FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid WHERE NOT pg_is_other_temp_schema(n.oid) AND relkind IN ('r', 'm', 'f') Table Stats -- pgMonitor Exporter (ccp_stat_user_tables) SELECT current_database() as dbname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_catalog.pg_stat_user_tables Percent to Emergency Vacuum & Wraparound WITH table_option (oid, val) AS ( SELECT oid, unnest(reloptions) FROM pg_class WHERE reloptions::text ~ 'autovacuum_freeze_max_age' ), autovac_option AS ( SELECT oid, val FROM table_option WHERE val LIKE 'autovacuum_freeze_max_age%' ) SELECT n.nspname, c.relname, c.relpages, CASE WHEN c.reloptions::text ~ 'autovacuum_enabled=false' THEN true ELSE false END AS autovacuum_disabled, coalesce(CASE WHEN ao.val IS NOT NULL THEN substring(ao.val,position('=' in ao.val)+1) ELSE null END, current_setting('autovacuum_freeze_max_age'))::numeric autovaccum_freeze_max_age, round(((greatest(age(c.relfrozenxid), age(t.relfrozenxid))::numeric) / coalesce(CASE WHEN ao.val IS NOT NULL THEN substring(ao.val,position('=' in ao.val)+1) ELSE null END, current_setting('autovacuum_freeze_max_age'))::numeric)*100) pct_emergency_vac, round(((greatest(age(c.relfrozenxid), age(t.relfrozenxid))::numeric) / 2146483647)*100) pct_wraparound, c.relfrozenxid table_relfrozenxid, age(c.relfrozenxid) table_relfrozenxid_age, t.relfrozenxid toast_relfrozenxid, age(t.relfrozenxid) toast_relfrozenxid_age FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace LEFT OUTER JOIN pg_class t ON c.reltoastrelid = t.oid LEFT OUTER JOIN autovac_option ao ON ao.oid = c.oid WHERE c.relkind in ('r', 'm', 'f') ORDER BY pct_emergency_vac DESC; SELECT c.oid::regclass , age(c.relfrozenxid) , pg_size_pretty(pg_total_relation_size(c.oid)) , current_setting('autovacuum_freeze_max_age') , round((age(c.relfrozenxid)::numeric / current_setting('autovacuum_freeze_max_age')::numeric)*100) pct_to_emer_vacuum , round((age(c.relfrozenxid)::numeric / 2146483647 )*100) pct_to_wraparound FROM pg_class c JOIN pg_namespace n on c.relnamespace = n.oid WHERE relkind IN ('r', 't', 'm') and c.oid in (76759, 76756) --AND n.nspname NOT IN ('pg_toast') ORDER BY 2 DESC LIMIT 100; Vacuum Progress select a.pid, pc.datname, relid, heap_blks_scanned, heap_blks_total, phase, round((heap_blks_scanned::numeric/heap_blks_total::numeric)*100) pct_done, extract(epoch from current_timestamp - xact_start) elasped_time_seconds, round(heap_blks_scanned / extract(epoch from current_timestamp - xact_start)) blks_per_second, round(((heap_blks_total - heap_blks_scanned) / (heap_blks_scanned / extract(epoch from current_timestamp - xact_start)))) est_time_remaining from pg_catalog.pg_stat_progress_vacuum pc join pg_stat_activity a on (a.pid=pc.pid) Percent Toward Exceeding Maximum for Auto Incrementing Columns SELECT seqs.relname AS sequence, format_type(s.seqtypid, NULL::integer) sequence_datatype, CONCAT(tbls.relname, '.', attrs.attname) AS owned_by, format_type(attrs.atttypid, atttypmod) AS column_datatype, pg_sequence_last_value(seqs.oid::regclass) AS last_sequence_value, TO_CHAR((CASE WHEN format_type(s.seqtypid, NULL::integer) = 'smallint' THEN(pg_sequence_last_value(seqs.relname::regclass) / 32767::float)WHEN format_type(s.seqtypid, NULL::integer) = 'integer' THEN(pg_sequence_last_value(seqs.relname::regclass) / 2147483647::float)WHEN format_type(s.seqtypid, NULL::integer) = 'bigint' THEN(pg_sequence_last_value(seqs.relname::regclass) / 9223372036854775807::float)END) * 100, 'fm9999999999999999999990D00%') AS sequence_percent, TO_CHAR((CASE WHEN format_type(attrs.atttypid, NULL::integer) = 'smallint' THEN(pg_sequence_last_value(seqs.relname::regclass) / 32767::float)WHEN format_type(attrs.atttypid, NULL::integer) = 'integer' THEN(pg_sequence_last_value(seqs.relname::regclass) / 2147483647::float)WHEN format_type(attrs.atttypid, NULL::integer) = 'bigint' THEN(pg_sequence_last_value(seqs.relname::regclass) / 9223372036854775807::float)END) * 100, 'fm9999999999999999999990D00%') AS column_percent FROM pg_depend d JOIN pg_class AS seqs ON seqs.relkind = 'S'AND seqs.oid = d.objid JOIN pg_class AS tbls ON tbls.relkind = 'r'AND tbls.oid = d.refobjid JOIN pg_attribute AS attrs ON attrs.attrelid = d.refobjid AND attrs.attnum = d.refobjsubid JOIN pg_sequence s ON s.seqrelid = seqs.oid WHERE d.deptype = 'a'AND d.classid = 1259;
0
0
102
Brian Pace
Jan 06, 2023
In Postgres Scripts
pg_stats_statements -- pgMonitor Exporter (ccp_pg_stat_statements) SELECT pg_get_userbyid(s.userid) as role, d.datname AS dbname, s.queryid, btrim(replace(left(s.query, 40), '\n', '')) AS query, s.plans, s.total_plan_time AS total_plan_time_ms, s.min_plan_time AS min_plan_time_ms, s.max_plan_time AS max_plan_time_ms, s.mean_plan_time AS mean_plan_time_ms, s.stddev_plan_time AS stddev_plan_time_ms, s.calls, s.total_exec_time AS total_exec_time_ms, s.min_exec_time AS min_exec_time_ms, s.max_exec_time AS max_exec_time_ms, s.mean_exec_time AS mean_exec_time_ms, s.stddev_exec_time AS stddev_exec_time_ms, s.rows, s.shared_blks_hit, s.shared_blks_read, s.shared_blks_dirtied, s.shared_blks_written, s.local_blks_hit, s.local_blks_read, s.local_blks_dirtied, s.local_blks_written, s.temp_blks_read, s.temp_blks_written, s.blk_read_time AS blk_read_time_ms, s.blk_write_time AS blk_write_time_ms, s.wal_records, s.wal_fpi, s.wal_bytes FROM public.pg_stat_statements s JOIN pg_catalog.pg_database d ON d.oid = s.dbid Select (total_time / 1000 / 60) as total, (total_time/calls) as avg, query from pg_stat_statements order by 1 desc limit 100; pg_stats_statements Reset SELECT pg_stat_statements_reset() Stat Statements Total -- pgMonitor Exporter (ccp_pg_stat_statements_total) SELECT pg_get_userbyid(s.userid) as role, d.datname AS dbname, sum(s.calls) AS calls_count, sum(s.total_exec_time) AS exec_time_ms, avg(s.mean_exec_time) AS mean_exec_time_ms, sum(s.rows) AS row_count FROM public.pg_stat_statements s JOIN pg_catalog.pg_database d ON d.oid = s.dbid GROUP BY 1,2 Top Mean/Avg -- pgMonitor Exporter (ccp_pg_stat_statements_top_mean) SELECT pg_get_userbyid(s.userid) as role, d.datname AS dbname, s.queryid, btrim(replace(left(s.query, 40), '\n', '')) AS query, max(s.mean_exec_time) exec_time_ms FROM public.pg_stat_statements s JOIN pg_catalog.pg_database d ON d.oid = s.dbid GROUP BY 1,2,3,4 ORDER BY 5 DESC LIMIT #PG_STAT_STATEMENTS_LIMIT# Top Max -- pgMonitor Exporter (ccp_pg_stat_statements_top_max) SELECT pg_get_userbyid(s.userid) as role, d.datname AS dbname, s.queryid, btrim(replace(left(s.query, 40), '\n', '')) AS query, s.max_exec_time AS exec_time_ms FROM public.pg_stat_statements s JOIN pg_catalog.pg_database d ON d.oid = s.dbid ORDER BY 5 DESC LIMIT #PG_STAT_STATEMENTS_LIMIT# Top Total -- Exporter (ccp_pg_stat_statements_top_total) SELECT pg_get_userbyid(s.userid) as role, d.datname AS dbname, s.queryid, btrim(replace(left(s.query, 40), '\n', '')) AS query, s.total_exec_time exec_time_ms FROM public.pg_stat_statements s JOIN pg_catalog.pg_database d ON d.oid = s.dbid ORDER BY 5 DESC LIMIT #PG_STAT_STATEMENTS_LIMIT#
0
0
60
Brian Pace
Jan 06, 2023
In Postgres Scripts
For the best security check SQL, check out Crunchy Data gitrepo for Crunchy Check Access.(https://github.com/CrunchyData/crunchy_check_access) Table Grants SELECT grantee ,table_catalog ,table_schema ,table_name ,string_agg(privilege_type, ', ' ORDER BY privilege_type) AS privileges FROM information_schema.role_table_grants WHERE grantee != 'postgres' -- grantee NOT IN ('postgres','PUBLIC') GROUP BY grantee, table_catalog, table_schema, table_name; Who Can Connect to What Databases? --By User: select pgu.usename as user_name, (select string_agg(pgd.datname, ',' order by pgd.datname) from pg_database pgd where has_database_privilege(pgu.usename, pgd.datname, 'CONNECT')) as database_name from pg_user pgu order by pgu.usename; --By Database: select pgd.datname as database_name, (select string_agg(pgu.usename, ',' order by pgu.usename) from pg_user pgu where has_database_privilege(pgu.usename, pgd.datname, 'CONNECT')) as user_name from pg_database pgd order by pgd.datname;
0
0
48
Brian Pace
Jan 06, 2023
In Postgres Scripts
Publications select * from pg_publication; Publication Tables select * from pg_publication_tables; Subscriptions select * from pg_catalog.pg_subscription ; Subscription Stats select * from pg_catalog.pg_stat_subscription_stats; Replication Lag/Performance Summary select r.pid, r.usename, r.application_name, pg_current_wal_lsn(), pg_current_wal_lsn()-r.replay_lsn replay_lag_bytes, s.total_txns, s.total_bytes from pg_stat_replication r left outer join pg_stat_replication_slots s on (r.application_name=s.slot_name);
0
0
23
Brian Pace
Jan 06, 2023
In Postgres Scripts
Note that queries may need to be executed on the source or target instances. Source: Current WAL LSN SELECT pg_current_wal_lsn(); Target: Last WAL LSN Received and Applied SELECT pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn() Pause WAL Replay/Apply SELECT pg_wal_replay_pause(); Resume WAL Replay/Apply SELECT pg_wal_replay_resume(); Replication Lag (in bytes) -- pgMonitor Exporter (ccp_replication_lag_size) SELECT client_addr as replica, client_hostname as replica_hostname, client_port as replica_port, pg_wal_lsn_diff(sent_lsn, replay_lsn) as bytes FROM pg_catalog.pg_stat_replication Replication Lag (in seconds) -- pgMonitor Exporter (ccp_replication_lag) SELECT CASE WHEN pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() THEN 0 ELSE EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())::INTEGER END AS replay_time, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())::INTEGER AS received_time Replication Slots -- pgMonitor Exporter (ccp_replication_slots) SELECT slot_name, active::int, pg_wal_lsn_diff(pg_current_wal_insert_lsn(), restart_lsn) AS retained_bytes FROM pg_catalog.pg_replication_slots; Replication Status Gap between pg_current_wal_lsn and sent_lsn may indicate heavy load on source system. Gap between sent_lsn and pg_last_wal_receive_lsn on standby may indicate network delay or standby under heavy load. SELECT pg_is_in_recovery(),pg_is_wal_replay_paused(), pg_last_wal_receive_lsn(), pg_last_wal_replay_lsn(), pg_last_xact_replay_timestamp() -- Source: select pid, usesysid, usename, application_name, client_addr, client_hostname, client_port, backend_start, backend_xmin, state, pg_current_wal_lsn() as current_wal_lsn, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time from pg_stat_replication; -- Target: select * from pg_stat_wal_receiver; Replication Lag Execute on replication target. SELECT now() - pg_last_xact_replay_timestamp()
0
0
34
Brian Pace
Jan 06, 2023
In Postgres Scripts
NOTE: All of the queries use views from the pgMonitor project. The views execute SHOW commands over a foreign data wrapper to pgBouncer. For details on these views and the setup, see the Crunchy Data gitrepo for pgMonitor. (https://github.com/CrunchyData/pgmonitor) For a more detailed look at pgBouncer metrics, see the Missing Guide to pgBouncer Metrics (https://www.dbaprogress.com/post/the-missing-guide-for-pgbouncer-metrics)blog post. Databases -- pgMonitor Exporter (ccp_pgbouncer_databases) SELECT name AS pool_db, CASE WHEN max_connections != 0 THEN ((current_connections::float / max_connections::float) * 100)::int ELSE ((current_connections::float / pool_size::float) * 100)::int END AS db_conn_perc_used, paused, disabled FROM pgbouncer_databases Clients -- pgMonitor Exporter (ccp_pgbouncer_clients) SELECT database || '.' || user as conn_pool, state AS client_state, count(*) AS client_state_count FROM pgbouncer_clients GROUP BY 1,2 Lists -- pgMonitor Exporter (ccp_pgbouncer_lists) SELECT list AS item, items AS item_count FROM pgbouncer_lists WHERE list IN ('databases', 'pools', 'free_clients', 'free_servers', 'used_servers') Pools -- pgMonitor Exporter (ccp_pgbouncer_pools) SELECT database || '.' || user as conn_pool, sum(cl_active) AS client_active, sum(cl_waiting) AS client_waiting, sum(sv_active) AS server_active, sum(sv_idle) AS server_idle, sum(sv_used) AS server_used FROM pgbouncer_pools GROUP BY 1 Servers -- pgMonitor Exporter (ccp_pgbouncer_servers) SELECT database || '.' || user as conn_pool, state as server_state, count(*) AS server_state_count FROM pgbouncer_servers GROUP BY 1,2
0
0
21
Brian Pace
Jan 06, 2023
In Postgres Scripts
Activity Session Summary SELECT pid,datname, usename, application_name, state, wait_event_type || ': ' || wait_event AS wait_event, pg_blocking_pids(pid) AS blocking_pids, current_timestamp-state_change time_in_state, current_timestamp-xact_start time_in_xact, to_char(state_change, 'YYYY-MM-DD HH24:MI:SS TZ') AS state_change, to_char(query_start, 'YYYY-MM-DD HH24:MI:SS TZ') AS query_start, to_char(xact_start, 'YYYY-MM-DD HH24:MI:SS TZ') AS xact_start, to_char(backend_start, 'YYYY-MM-DD HH24:MI:SS TZ') AS backend_start, backend_type, query FROM pg_stat_activity WHERE usename is not null ORDER BY state, wait_event; State Summary  SELECT state, wait_event_type || ': ' || wait_event AS wait_event, count(1) cnt, min(current_timestamp-state_change) min_time_in_state, avg(current_timestamp-state_change) avg_time_in_state, max(current_timestamp-state_change) max_time_in_state FROM pg_stat_activity GROUP BY state, wait_event_type || ': ' || wait_event ORDER BY 1,2; User/Stage Summary SELECT usename, state, count(1) cnt FROM pg_stat_activity WHERE usename is not null GROUP BY usename, state ORDER BY usename, state; bgWriter Stats -- pgMonitor Exporter (ccp_stat_bgwriter) SELECT checkpoints_timed, checkpoints_req, checkpoint_write_time, checkpoint_sync_time, buffers_checkpoint, buffers_clean, maxwritten_clean, buffers_backend, buffers_backend_fsync, buffers_alloc, stats_reset FROM pg_catalog.pg_stat_bgwriter Block IO SELECT (SELECT sum(blks_read) as "Read" FROM pg_stat_database), (SELECT sum(blks_hit) as "Hits" FROM pg_stat_database) Blocking Locks WITH sos AS ( SELECT array_cat(array_agg(pid), array_agg((pg_blocking_pids(pid))[array_length(pg_blocking_pids(pid),1)])) pids FROM pg_locks WHERE NOT granted ) SELECT a.pid, a.usename, a.datname, a.state, a.wait_event_type || ': ' || a.wait_event AS wait_event, current_timestamp-a.state_change time_in_state, current_timestamp-a.xact_start time_in_xact, l.relation::regclass relname, l.locktype, l.mode, l.page, l.tuple, pg_blocking_pids(l.pid) blocking_pids, (pg_blocking_pids(l.pid))[array_length(pg_blocking_pids(l.pid),1)] last_session, coalesce((pg_blocking_pids(l.pid))[1]||'.'||coalesce(case when locktype='transactionid' then 1 else array_length(pg_blocking_pids(l.pid),1)+1 end,0),a.pid||'.0') lock_depth, a.query FROM pg_stat_activity a JOIN sos s on (a.pid = any(s.pids)) LEFT OUTER JOIN pg_locks l on (a.pid = l.pid and not l.granted) ORDER BY lock_depth; Blocking Locks Tree WITH sos AS ( SELECT array_cat(array_agg(pid), array_agg((pg_blocking_pids(pid))[array_length(pg_blocking_pids(pid),1)])) pids FROM pg_locks WHERE NOT granted ) SELECT lpad('',(coalesce(case when locktype='transactionid' then 1 else array_length(pg_blocking_pids(l.pid),1)+1 end,0)+1)*5) || a.pid::text pid, l.relation::regclass relname, l.locktype, l.mode, l.page, l.tuple, coalesce((pg_blocking_pids(l.pid))[1]||'.'||to_char(coalesce(case when locktype='transactionid' then 1 else array_length(pg_blocking_pids(l.pid),1)+1 end,0),'FM000'),a.pid||'.000') lock_depth, pg_blocking_pids(a.pid) blocking_pids FROM pg_stat_activity a JOIN sos s on (a.pid = any(s.pids)) LEFT OUTER JOIN pg_locks l on (a.pid = l.pid and not l.granted) ORDER BY lock_depth; Database Stats -- pgMonitor Exporter (ccp_stat_database) SELECT s.datname as dbname, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks FROM pg_catalog.pg_stat_database s JOIN pg_catalog.pg_database d on d.datname = s.datname WHERE d.datistemplate = false Index Stats -- Hit Ratio SELECT 'index hit rate' as name, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit + idx_blks_read) as ratio From pg_statio_user_indexes Union all Select 'cache hit rate' as name, Case sum(idx_blks_hit) when 0 then 'NaN'::numeric Else to_char((sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit + idx_blks_read),'99.99')::numeric end as ration From pg_statio_user_indexes; -- Index Hit Rate Select relname, 100* idx_scan / (seq_scan + idx_scan), n_live_tup from pg_stat_user_tables order by n_live_typ desc; Locks -- pgMonitor Exporter (ccp_locks) SELECT pg_database.datname as dbname, tmp.mode, COALESCE(count,0) as count FROM ( VALUES ('accesssharelock'), ('rowsharelock'), ('rowexclusivelock'), ('shareupdateexclusivelock'), ('sharelock'), ('sharerowexclusivelock'), ('exclusivelock'), ('accessexclusivelock') ) AS tmp(mode) CROSS JOIN pg_catalog.pg_database LEFT JOIN (SELECT database, lower(mode) AS mode,count(*) AS count FROM pg_catalog.pg_locks WHERE database IS NOT NULL GROUP BY database, lower(mode) ) AS tmp2 ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database Session Info (General) select datname, pid, leader_pid, backend_type, usename, application_name, state, wait_event_type, wait_event, trunc(extract(epoch from current_timestamp-backend_start)) session_age, trunc(extract(epoch from current_timestamp-xact_start)) transaction_age, trunc(extract(epoch from current_timestamp-query_start)) query_age, trunc(extract(epoch from current_timestamp-state_change)) state_age, query, client_addr, client_hostname from pg_stat_activity order by state, usename; Table Stats -- pgMonitor Exporter (ccp_stat_user_tables) SELECT current_database() as dbname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_catalog.pg_stat_user_tables Transactions SELECT (SELECT sum(xact_commit) + sum(xact_rollback) AS "Total" FROM pg_stat_database), (SELECT sum(xact_commit) AS "Commit" FROM pg_stat_database), (SELECT sum(xact_rollback) AS "Rollback" FROM pg_stat_database)
0
0
76
Brian Pace
Jan 06, 2023
In Postgres Scripts
CPU -- pgMonitor Exporter (ccp_nodemx_cpu) SELECT monitor.kdapi_scalar_bigint('cpu_request') as request, monitor.kdapi_scalar_bigint('cpu_limit') as limit; -- Exporter (ccp_nodemx_cpucfs) SELECT monitor.cgroup_scalar_bigint('cpu.cfs_period_us') as period_us, case when monitor.cgroup_scalar_bigint('cpu.cfs_quota_us') < 0 then 0 else monitor.cgroup_scalar_bigint('cpu.cfs_quota_us') end as quota_us; -- Exporter (ccp_nodemx_cpuacct) SELECT monitor.cgroup_scalar_bigint('cpuacct.usage') as usage, clock_timestamp() as usage_ts; -- Exporter (ccp_nodemx_cpustat) WITH d(key, val) AS (SELECT key, val FROM monitor.cgroup_setof_kv('cpu.stat')) SELECT (SELECT val FROM d WHERE key='nr_periods') AS nr_periods, (SELECT val FROM d WHERE key='nr_throttled') AS nr_throttled, (SELECT val FROM d WHERE key='throttled_time') AS throttled_time, clock_timestamp() as snap_ts; Disk Activity -- pgMonitor Exporter (ccp_nodemx_disk_activity) SELECT mount_point,sectors_read,sectors_written FROM monitor.proc_mountinfo() m JOIN monitor.proc_diskstats() d USING (major_number, minor_number) WHERE m.mount_point IN ('/pgdata', '/pgwal') OR m.mount_point like '/tablespaces/%'; Disk Utilization -- pgMonitor Exporter (ccp_nodemx_data_disk) SELECT mount_point, round(((total_bytes-available_bytes)/total_bytes)*100) pct_used, fs_type, total_bytes, available_bytes, total_file_nodes, free_file_nodes FROM monitor.proc_mountinfo() m JOIN monitor.fsinfo(m.mount_point) f USING (major_number, minor_number) WHERE m.mount_point IN ('/pgdata', '/pgwal') OR m.mount_point like '/tablespaces/%'; Memory -- pgMonitor Exporter (ccp_nodemx_mem) WITH d(key, val) as (SELECT key, val FROM monitor.cgroup_setof_kv('memory.stat')) SELECT monitor.kdapi_scalar_bigint('mem_request') as request, case when monitor.cgroup_scalar_bigint('memory.limit_in_bytes') = 9223372036854771712 then 0 else monitor.cgroup_scalar_bigint('memory.limit_in_bytes') end as limit, (SELECT val FROM d WHERE key='cache') as cache, (SELECT val FROM d WHERE key='rss') as rss, (SELECT val FROM d WHERE key='shmem') as shmem, (SELECT val FROM d WHERE key='mapped_file') as mapped_file, (SELECT val FROM d WHERE key='dirty') as dirty, (SELECT val FROM d WHERE key='active_anon') as active_anon, (SELECT val FROM d WHERE key='inactive_anon') as inactive_anon, (SELECT val FROM d WHERE key='active_file') as active_file, (SELECT val FROM d WHERE key='inactive_file') as inactive_file, monitor.cgroup_scalar_bigint('memory.usage_in_bytes') as usage_in_bytes, monitor.cgroup_scalar_bigint('memory.kmem.usage_in_bytes') as kmem_usage_in_bytes; Network -- pgMonitor Exporter (ccp_odemx_network) SELECT interface,tx_bytes,tx_packets, rx_bytes,rx_packets FROM monitor.proc_network_stats(); Process Count -- pgMonitor Exporter (ccp_nodemx_process) SELECT monitor.cgroup_process_count() as count;
0
0
12
Brian Pace
Jan 06, 2023
In Postgres Scripts
Creation Progress Monitoring select t.relname, index_relid, command, phase, blocks_done, blocks_total, case when blocks_total > 0 then round(100*(blocks_done::numeric/blocks_total::numeric)) else 0 end pct_done from pg_class t join pg_catalog.pg_stat_progress_create_index i on (t.oid = i.relid) ; Duplicate SELECT pg_size_pretty(sum(pg_relation_size(idx))::bigint) as size, (array_agg(idx))[1] as idx1, (array_agg(idx))[2] as idx2, (array_agg(idx))[3] as idx3, (array_agg(idx))[4] as idx4 FROM ( SELECT indexrelid::regclass as idx, (indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'|| coalesce(indexprs::text,'')||E'\n' || coalesce(indpred::text,'')) as key FROM pg_index) sub GROUP BY key HAVING count(*)>1 ORDER BY sum(pg_relation_size(idx)) DESC; Size SELECT pg_size_pretty (pg_indexes_size('<table name>')); Stats -- Hit Ratio SELECT 'index hit rate' as name, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit + idx_blks_read) as ratio From pg_statio_user_indexes Union all Select 'cache hit rate' as name, Case sum(idx_blks_hit) when 0 then 'NaN'::numeric Else to_char((sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit + idx_blks_read),'99.99')::numeric end as ration From pg_statio_user_indexes; -- Index Hit Rate Select relname, 100* idx_scan / (seq_scan + idx_scan), n_live_tup from pg_stat_user_tables order by n_live_typ desc; Summary SELECT pg_class.relname, pg_size_pretty(pg_class.reltuples::bigint) AS rows_in_bytes, pg_class.reltuples AS num_rows, COUNT(*) AS total_indexes, COUNT(*) FILTER ( WHERE indisunique) AS unique_indexes, COUNT(*) FILTER ( WHERE indnatts = 1 ) AS single_column_indexes, COUNT(*) FILTER ( WHERE indnatts IS DISTINCT FROM 1 ) AS multi_column_indexes FROM pg_namespace LEFT JOIN pg_class ON pg_namespace.oid = pg_class.relnamespace LEFT JOIN pg_index ON pg_class.oid = pg_index.indrelid WHERE pg_namespace.nspname = 'public' AND pg_class.relkind = 'r' GROUP BY pg_class.relname, pg_class.reltuples ORDER BY pg_class.reltuples DESC; Unused SELECT *, pg_size_pretty(pg_relation_size(indexrelname)) FROM pg_stat_all_indexes WHERE schemaname = 'public' ORDER BY pg_relation_size(indexrelname) DESC, idx_scan ASC; Usage SELECT t.schemaname, t.tablename, c.reltuples::bigint AS num_rows, pg_size_pretty(pg_relation_size(c.oid)) AS table_size, psai.indexrelname AS index_name, pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size, CASE WHEN i.indisunique THEN 'Y' ELSE 'N' END AS "unique", psai.idx_scan AS number_of_scans, psai.idx_tup_read AS tuples_read, psai.idx_tup_fetch AS tuples_fetched FROM pg_tables t LEFT JOIN pg_class c ON t.tablename = c.relname LEFT JOIN pg_index i ON c.oid = i.indrelid LEFT JOIN pg_stat_all_indexes psai ON i.indexrelid = psai.indexrelid WHERE t.schemaname NOT IN ('pg_catalog', 'information_schema') ORDER BY 1, 2; SELECT t.schemaname, t.tablename, indexname, c.reltuples AS num_rows, pg_size_pretty(pg_relation_size(quote_ident(t.schemaname)::text || '.' || quote_ident(t.tablename)::text)) AS table_size, pg_size_pretty(pg_relation_size(quote_ident(t.schemaname)::text || '.' || quote_ident(indexrelname)::text)) AS index_size, CASE WHEN indisunique THEN 'Y' ELSE 'N' END AS UNIQUE, number_of_scans, tuples_read, tuples_fetched FROM pg_tables t LEFT OUTER JOIN pg_class c ON t.tablename = c.relname LEFT OUTER JOIN ( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan AS number_of_scans, idx_tup_read AS tuples_read, idx_tup_fetch AS tuples_fetched, indexrelname, indisunique, schemaname FROM pg_index x JOIN pg_class c ON c.oid = x.indrelid JOIN pg_class ipg ON ipg.oid = x.indexrelid JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid ) AS foo ON t.tablename = foo.ctablename AND t.schemaname = foo.schemaname WHERE t.schemaname NOT IN ('pg_catalog', 'information_schema') ORDER BY 1,2;
0
0
45
Brian Pace
Jan 06, 2023
In Postgres Scripts
Installed select * from pg_catalog.pg_extension; Upgrade Installed Extension do $$ declare l_sql text; l_rec record; beginfor l_rec in select extname from pg_extension loop l_sql := format('alter extension %I update', l_rec.extname); execute l_sql; end loop; end; $$ ;
0
0
23
Brian Pace
Jan 06, 2023
In Postgres Scripts
Archive Command Status -- pgMonitor Exporter (ccp_archive_command_status) SELECT CASE WHEN EXTRACT(epoch from (last_failed_time - last_archived_time)) IS NULL THEN 0 WHEN EXTRACT(epoch from (last_failed_time - last_archived_time)) < 0 THEN 0 ELSE EXTRACT(epoch from (last_failed_time - last_archived_time)) END AS seconds_since_last_fail, EXTRACT(epoch from (CURRENT_TIMESTAMP - last_archived_time)) AS seconds_since_last_archive, archived_count, failed_count FROM pg_catalog.pg_stat_archiver Bloat Check Note: Requires Custom Objects Per Database -- pgMonitor Exporter (ccp_bloat_check) SELECT current_database() AS dbname, schemaname, objectname, size_bytes, (dead_tuple_size_bytes + (free_space_bytes - (relpages - (fillfactor/100) * relpages ) * current_setting('block_size')::bigint ))::bigint AS total_wasted_space_bytes FROM bloat_stats Checkpoint Settings -- pgMonitor Exporter (ccp_settings_gauge) SELECT (SELECT setting::int FROM pg_catalog.pg_settings WHERE name = 'checkpoint_timeout') as checkpoint_timeout, (SELECT setting::float FROM pg_catalog.pg_settings WHERE name = 'checkpoint_completion_target') as checkpoint_completion_target, (SELECT 8192*setting::bigint as bytes FROM pg_catalog.pg_settings WHERE name = 'shared_buffers') as shared_buffers Checksum Failure -- pgMonitor Exporter (ccp_data_checksum_failure) SELECT datname AS dbname, checksum_failures AS count, coalesce(extract(epoch from (now()-checksum_last_failure)), 0) AS time_since_last_failure_seconds FROM pg_catalog.pg_stat_database; Checksum Settings -- pgMonitor Exporter (ccp_pg_settings_checksum) SELECT monitor.pg_settings_checksum() AS status Configuration Settings (database level/cluster level) SELECT coalesce(role.rolname, 'database wide') as role, coalesce(db.datname, 'cluster wide') as database, setconfig as what_changed FROM pg_db_role_setting role_setting LEFT JOIN pg_roles role ON role.oid = role_setting.setrole LEFT JOIN pg_database db ON db.oid = role_setting.setdatabase; Database Info SELECT pg_current_logfile(), pg_conf_load_time(), pg_postmaster_start_time(), pg_current_snapshot(), version() Database Settings -- Non-Default Settings SELECT name, current_setting(name) FROM pg_settings WHERE source <> 'default' AND setting is distinct from boot_val; -- All Settings SELECT name, setting FROM pg_settings Database Size -- pgMonitor Exporter (ccp_database_size) SELECT datname as dbname, pg_database_size(datname) as bytes FROM pg_catalog.pg_database WHERE datistemplate = false In Recovery? -- pgMonitor Exporter (ccp_is_in_recovery) SELECT CASE WHEN pg_is_in_recovery = true THEN 1 ELSE 2 END AS status FROM pg_is_in_recovery(); Postgres Version -- pgMonitor Exporter (ccp_postgresql_version) SELECT current_setting('server_version_num')::int AS current Postmaster Runtime -- pgMonitor Exporter (ccp_postmaster_runtime) SELECT extract('epoch' from pg_postmaster_start_time) as start_time_seconds FROM pg_catalog.pg_postmaster_start_time() Postmaster Uptime -- pgMonitor Exporter (ccp_postmaster_uptime) SELECT extract(epoch FROM (now() - pg_postmaster_start_time() )) AS seconds; Sequence Exhaustion -- pgMonitor Exporter (ccp_sequence_exhaustion) *Uses ccp_monitoring view* SELECT count FROM monitor.sequence_exhaustion(75) Settings Pending Restart -- pgMonitor Exporter (ccp_settings_pending_restart) SELECT count(*) AS count FROM pg_catalog.pg_settings WHERE pending_restart = true Shared Buffer Usage SELECT c.relname , pg_size_pretty(count(*) * 8192) as buffered , round(100.0 * count(*) / ( SELECT setting FROM pg_settings WHERE name='shared_buffers')::integer,1) AS buffers_percent , round(100.0 * count(*) * 8192 / pg_relation_size(c.oid),1) AS percent_of_relation FROM pg_class c INNER JOIN pg_buffercache b ON b.relfilenode = c.relfilenode INNER JOIN pg_database d ON (b.reldatabase = d.oid AND d.datname = current_database()) WHERE pg_relation_size(c.oid) > 0 GROUP BY c.oid, c.relname ORDER BY 3 DESC LIMIT 10; SELECT pg_size_pretty(count(*) * 8192) as ideal_shared_buffers FROM pg_buffercache b WHERE usagecount >= 3; Percent to Emergency Vacuum SELECT datname , age(datfrozenxid) , current_setting('autovacuum_freeze_max_age') , round((age(datfrozenxid)::numeric / current_setting('autovacuum_freeze_max_age')::numeric)*100) pct_to_emer_vacuum FROM pg_database ORDER BY 2 DESC; Transaction Wraparound -- pgMonitor Exporter (ccp_transaction_wraparound) WITH max_age AS (SELECT 2146483647 as max_old_xid, setting AS autovacuum_freeze_max_age FROM pg_catalog.pg_settings WHERE name = 'autovacuum_freeze_max_age'), per_database_stats AS (SELECT datname, m.max_old_xid::int, m.autovacuum_freeze_max_age::int, age(d.datfrozenxid) AS oldest_current_xid FROM pg_catalog.pg_database d JOIN max_age m ON (true) WHERE d.datallowconn) SELECT max(oldest_current_xid) AS oldest_current_xid, max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound, max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_emergency_autovac FROM per_database_stats
0
0
51
Brian Pace
Oct 14, 2022
In Postgres Functions
Description Generates a series from start to stop. Step can be used to control the direction and increments of the series (positive counts up, negative counts down). Syntax generate_series ( start, stop [, step ] ) Example postgres=# SELECT * FROM generate_series(1,10); generate_series ----------------- 1 2 3 4 5 6 7 8 9 10 (10 rows) Use Cases Generate series is used a lot to populate a data set with a fixed number of rows. In the example below, it is used in a forecasting query to project out performance metrics for 1 year in 30 day increments. INSERT INTO target_metric_forecast (tid, metric_category, metric_name, metric_key, avg_val, day_count, forecast_dt) (SELECT tid, cm.metric_category, cm.metric_name, cm.metric_key, ROUND( (t.slope * extract(julian from t.max_timestamp + make_interval(days=>r.n)) + t.intercept)::numeric,2) forecast_avg, r.n daycount, t.max_timestamp + make_interval(days=>r.n) forecast_date FROM (SELECT s.daynbr n FROM generate_series(1,360) s (daynbr) ORDER BY s.daynbr) r, (SELECT tid, regr_slope(avg_val,jdt) slope, regr_intercept(avg_val,jdt) intercept, count(1) cnt, max(rollup_dt) max_timestamp FROM (SELECT o.tid, d.rollup_dt, extract(julian from d.rollup_dt) jdt, d.avg_val FROM target o JOIN target_metric_summary_day d on (o.tid = d.tid) JOIN (SELECT tid, metric_category, metric_name, min(rollup_dt) RollupDateMin, max(rollup_dt) RollupDateMax FROM target_metric_summary_day GROUP BY tid, metric_category, metric_name) as r on (d.tid=r.tid AND d.metric_category = r.metric_category and d.metric_name = r.metric_name) WHERE o.tid = vtid AND d.metric_category = cm.metric_category AND d.metric_name = cm.metric_name AND coalesce(d.metric_key,'null') = coalesce(cm.metric_key,'null') AND d.rollup_dt >= case when date_trunc('day',current_timestamp - make_interval(days=>vlookbackdays)) < r.RollupDateMin then r.RollupDateMin else date_trunc('day', current_timestamp-make_interval(days=>90)) end ORDER BY o.tid, d.rollup_dt, extract(julian from d.rollup_dt)) as tt GROUP BY tid) t WHERE t.max_timestamp + make_interval(days=>r.n) > date_trunc('day', current_timestamp) AND mod(r.n,30)=0 );
0
0
18
Brian Pace
Oct 14, 2022
In Postgres Scripts
Session Info SELECT current_database(), current_role, current_schema(), current_schemas(true), pg_backend_pid(), pg_current_xact_id(), inet_client_addr(), inet_server_addr() ; Session Status SELECT datname, usename, sum(case when state='active' then 1 else 0 end) as active, sum(case when state='idle' then 1 else 0 end) as idle, sum(case when state is null then 1 else 0 end) as background, count(1) as total FROM pg_catalog.pg_stat_activity GROUP BY datname, usename; SELECT sum(case when state='active' then 1 else 0 end) as active, sum(case when state='idle' then 1 else 0 end) as idle, sum(case when state is null then 1 else 0 end) as background, count(1) as total FROM pg_catalog.pg_stat_activity; -- pgMonitor Exporter (ccp_connection_stats) SELECT ((total - idle) - idle_in_txn) as active, total, idle, idle_in_txn, (SELECT coalesce(extract(epoch from (max(now() - state_change))),0) FROM pg_catalog.pg_stat_activity WHERE state = 'idle in transaction') as max_idle_in_txn_time, (SELECT coalesce(extract(epoch from (max(now() - query_start))),0) FROM pg_catalog.pg_stat_activity WHERE backend_type = 'client backend' and state <> 'idle' ) as max_query_time, (SELECT coalesce(extract(epoch from (max(now() - query_start))),0) FROM pg_catalog.pg_stat_activity WHERE backend_type = 'client backend' and wait_event_type = 'Lock' ) as max_blocked_query_time, max_connections FROM (SELECT count(*) as total, coalesce(sum(case when state = 'idle' then 1 else 0 end),0) as idle, coalesce(sum(case when state = 'idle in transaction' then 1 else 0 end),0) as idle_in_txn FROM pg_catalog.pg_stat_activity) x JOIN (SELECT setting::float AS max_connections FROM pg_settings WHERE name = 'max_connections') xx ON (true); select datname, pid, leader_pid, backend_type, usename, application_name, state, wait_event_type, wait_event, trunc(extract(epoch from current_timestamp-backend_start)) session_age, trunc(extract(epoch from current_timestamp-xact_start)) transaction_age, trunc(extract(epoch from current_timestamp-query_start)) query_age, trunc(extract(epoch from current_timestamp-state_change)) state_age, query, client_addr, client_hostname from pg_stat_activity order by state, usename; Cancel Session SELECT pg_cancel_backend(<pid>); Disconnect Session SELECT pg_terminate_backend(<pid>); SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE usename = '<role>'; Cancel All Long Running SQL SELECT pid, now() - pg_stat_activity.query_start AS duration, query, state, pg_cancel_backend(pid) as cancelled FROM pg_stat_activity WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes' and backend_type='client backend' and state='active'; Session Info with Client Details SELECT datname, a.pid as pid, usename, client_addr, client_port, round(extract(epoch from (now() - xact_start))) as age, wait_event_type IS NOT DISTINCT FROM 'Lock' AS waiting, NULLIF(array_to_string(ARRAY(SELECT unnest(pg_blocking_pids(a.pid)) ORDER BY 1), ','), '') as locked_by, CASE WHEN state = 'idle in transaction' THEN CASE WHEN xact_start != state_change THEN 'idle in transaction ' || CAST( abs(round(extract(epoch from (now() - state_change)))) AS text) ELSE 'idle in transaction' END WHEN state = 'active' THEN query ELSE state END AS query FROM pg_stat_activity a WHERE a.pid != pg_backend_pid() AND a.datname IS NOT NULL GROUP BY 1,2,3,4,5,6,7,9
0
0
41

Brian Pace

Admin
More actions
bottom of page