-
Notifications
You must be signed in to change notification settings - Fork 31
/
postgresql-custom-query.yml.sample
100 lines (85 loc) · 4.58 KB
/
postgresql-custom-query.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
---
queries:
# Metric names are set to the column names in the query results
- query: >-
SELECT
BG.checkpoints_timed AS scheduled_checkpoints_performed,
BG.checkpoints_req AS requested_checkpoints_performed,
BG.buffers_checkpoint AS buffers_written_during_checkpoint,
BG.buffers_clean AS buffers_written_by_background_writer,
BG.maxwritten_clean AS background_writer_stops,
BG.buffers_backend AS buffers_written_by_backend,
BG.buffers_alloc AS buffers_allocated
FROM pg_stat_bgwriter BG;
# database defaults to the auth database in the main config
database: postgres
# If not set explicitly here, metric type will default to
# 'gauge' for numbers and 'attribute' for strings
metric_types:
buffers_allocated: rate
# If unset, sample_name defaults to PostgresCustomSample
sample_name: MyCustomSample
# Query to collect unused indexes. This query needs to repeat for every user database to collect data from all of them.
- query: >-
SELECT schemaname, CAST(relname as varchar(100)), CAST(indexrelname as varchar(100)), idx_scan, idx_tup_fetch, idx_tup_read,
pg_size_pretty(pg_relation_size(indexrelid)) as idx_size,
pg_size_pretty(sum(pg_relation_size(indexrelid))
OVER (ORDER BY idx_scan, indexrelid)) as total
FROM pg_stat_user_indexes
WHERE idx_scan=0
AND idx_tup_fetch=0
AND idx_tup_read=0
LIMIT 25;
# database defaults to the auth database in the main config
# database: postgres
# If unset, sample_name defaults to PostgresCustomSample
sample_name: PostgresUnusedIndexesSample
# Query to collect missing indexes. This query needs to repeat for every user database to collect data from all of them.
- query: >-
SELECT schemaname, CAST(relname as varchar(100)), seq_scan, seq_tup_read, seq_tup_read/seq_scan as avg, idx_scan
FROM pg_stat_user_tables
WHERE seq_scan > 0
LIMIT 25;
# database defaults to the auth database in the main config
# database: postgres
# If unset, sample_name defaults to PostgresCustomSample
sample_name: PostgresMissingIndexesSample
# Query to collect most expensive queries. This query needs to repeat for every user database to collect data from all of them.
# Note this extension may not be enabled on your server.
# In the main postgres-config.yml file it is best to let it default to the postgres database when connecting.
## For AWS RDS environments pg_stat_statements will be available by default depending on your version.
## AWS link to check: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts.General.FeatureSupport.Extensions
## For AWS Aurora instances running Postgres 13+ it may be necessary to manually create the pg_stat_statement extension with the following command:
### CREATE EXTENSION pg_stat_statements;
# For standalone instances it must be added to your postgresql.conf file.
## Link here: https://www.postgresql.org/docs/current/pgstatstatements.html
# Uncomment this query when the pg_stat_statement extension has been added to the shared_preload_libraries
#- query: >-
# SELECT CAST(d.datname as varchar(100)) as databasename,
# CAST(u.usename as varchar(100)) as username,
# round(( 100 * s.total_time / sum(s.total_time) over ())::smallint) as percent,
# CAST(s.total_time as int), s.calls as total_calls, s.rows as total_rows,
# round(s.mean_time::int) as mean_time, substring(s.query, 1, 4000) as query
# FROM pg_stat_statements s
# JOIN pg_database d ON (s.dbid = d.oid)
# JOIN pg_user u ON (s.userid = u.usesysid)
# ORDER BY s.total_time DESC
# LIMIT 50;
# database defaults to the auth database in the main config
# database: postgres
# Use this version of the query if running Postgres 13+
#- query: >-
# SELECT CAST(d.datname as varchar(100)) as databasename,
# CAST(u.usename as varchar(100)) as username,
# round(( 100 * s.total_exec_time / sum(s.total_exec_time) over ())::smallint) as percent,
# CAST(s.total_exec_time as int), s.calls as total_calls, s.rows as total_rows,
# round(s.mean_exec_time::int) as mean_time, substring(s.query, 1, 4000) as query
# FROM pg_stat_statements s
# JOIN pg_database d ON (s.dbid = d.oid)
# JOIN pg_user u ON (s.userid = u.usesysid)
# ORDER BY s.total_exec_time DESC
# LIMIT 50;
# database defaults to the auth database in the main config
# database: postgres
# If unset, sample_name defaults to PostgresCustomSample
# sample_name: PostgresExpensiveQueriesSample