Skip to content

Commit f069a2f

Browse files
Added passwordless access to sink-postgres, removed unnecessary port mappings, fixed index definition metric, added a trigger that prevents duplicates in index definitions table, made init.sql for sink-postgres lowercase
1 parent af412d0 commit f069a2f

File tree

5 files changed

+157
-72
lines changed

5 files changed

+157
-72
lines changed

config/pgwatch-postgres/metrics.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,13 @@ metrics:
1919
sqls:
2020
11: |-
2121
select /* pgwatch_generated */
22-
indexname,
22+
indexrelname,
23+
schemaname,
24+
relname,
2325
pg_get_indexdef(indexrelid) as index_definition
2426
from pg_stat_all_indexes
25-
order by schemaname, tablename, indexname;
27+
order by schemaname, relname, indexrelname
28+
limit 10000;
2629
gauges:
2730
- '*'
2831

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
#!/bin/bash
2+
# Configure pg_hba.conf to allow trust authentication from Docker networks
3+
4+
cat > ${PGDATA}/pg_hba.conf <<EOF
5+
# PostgreSQL Client Authentication Configuration File
6+
# Custom configuration for sink-postgres container
7+
# Allows passwordless (trust) authentication from Docker network
8+
9+
# TYPE DATABASE USER ADDRESS METHOD
10+
11+
# "local" is for Unix domain socket connections only
12+
local all all trust
13+
14+
# IPv4 local connections:
15+
host all all 127.0.0.1/32 trust
16+
17+
# IPv6 local connections:
18+
host all all ::1/128 trust
19+
20+
# Allow replication connections from localhost
21+
local replication all trust
22+
host replication all 127.0.0.1/32 trust
23+
host replication all ::1/128 trust
24+
25+
# Allow all connections from Docker networks without password
26+
# This is safe because sink-postgres is not exposed externally
27+
host all all 172.16.0.0/12 trust
28+
host all all 192.168.0.0/16 trust
29+
host all all 10.0.0.0/8 trust
30+
EOF
31+
32+
# Reload PostgreSQL configuration
33+
pg_ctl reload -D ${PGDATA}
34+

config/sink-postgres/init.sql

Lines changed: 102 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -3,79 +3,138 @@
33
-- Based on https://pgwat.ch/latest/howto/metrics_db_bootstrap.html
44

55
-- Create the pgwatch role for measurements database
6-
CREATE ROLE pgwatch WITH LOGIN PASSWORD 'pgwatchadmin';
6+
create role pgwatch with login password 'pgwatchadmin';
77

88
-- Create the measurements database owned by pgwatch
9-
CREATE DATABASE measurements OWNER pgwatch;
9+
create database measurements owner pgwatch;
1010

1111
-- Switch to the measurements database context
1212
\c measurements;
1313

1414
-- Create extensions that might be useful for metrics storage
15-
CREATE EXTENSION IF NOT EXISTS btree_gist;
16-
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
15+
create extension if not exists btree_gist;
16+
create extension if not exists pg_stat_statements;
1717

1818
-- Grant necessary permissions to pgwatch user
19-
GRANT ALL PRIVILEGES ON DATABASE measurements TO pgwatch;
20-
GRANT ALL PRIVILEGES ON SCHEMA public TO pgwatch;
19+
grant all privileges on database measurements to pgwatch;
20+
grant all privileges on schema public to pgwatch;
2121

2222

2323

2424
-- Create a partitioned table for queryid-to-query mappings with LIST partitioning by dbname
25-
CREATE TABLE IF NOT EXISTS public.pgss_queryid_queries (
26-
time TIMESTAMPTZ NOT NULL,
27-
dbname TEXT NOT NULL,
28-
data JSONB NOT NULL,
29-
tag_data JSONB
30-
) PARTITION BY LIST (dbname);
25+
create table if not exists public.pgss_queryid_queries (
26+
time timestamptz not null,
27+
dbname text not null,
28+
data jsonb not null,
29+
tag_data jsonb
30+
) partition by list (dbname);
3131

3232
-- Create indexes for efficient lookups
33-
CREATE INDEX IF NOT EXISTS pgss_queryid_queries_dbname_time_idx ON public.pgss_queryid_queries (dbname, time);
33+
create index if not exists pgss_queryid_queries_dbname_time_idx on public.pgss_queryid_queries (dbname, time);
3434

3535
-- Use existing subpartitions schema
3636

3737

3838
-- Set ownership and grant permissions to pgwatch
39-
ALTER TABLE public.pgss_queryid_queries OWNER TO pgwatch;
40-
GRANT ALL PRIVILEGES ON TABLE public.pgss_queryid_queries TO pgwatch;
39+
alter table public.pgss_queryid_queries owner to pgwatch;
40+
grant all privileges on table public.pgss_queryid_queries to pgwatch;
4141
-- Ensure pgwatch can use sequences (if any are created)
42-
GRANT USAGE ON SCHEMA public TO pgwatch;
42+
grant usage on schema public to pgwatch;
4343
-- Grant permissions on all future tables in public schema
44-
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO pgwatch;
44+
alter default privileges in schema public grant all on tables to pgwatch;
4545

46-
CREATE OR REPLACE FUNCTION enforce_queryid_uniqueness()
47-
RETURNS TRIGGER AS $$
48-
DECLARE
49-
queryid_value TEXT;
50-
BEGIN
46+
create or replace function enforce_queryid_uniqueness()
47+
returns trigger as $$
48+
declare
49+
queryid_value text;
50+
begin
5151
-- Extract queryid from the data JSONB
52-
queryid_value := NEW.data->>'queryid';
52+
queryid_value := new.data->>'queryid';
5353

5454
-- Allow NULL queryids through
55-
IF queryid_value IS NULL THEN
56-
RETURN NEW;
57-
END IF;
55+
if queryid_value is null then
56+
return new;
57+
end if;
5858

5959
-- Silently skip if duplicate exists
60-
IF EXISTS (
61-
SELECT 1
62-
FROM pgss_queryid_queries
63-
WHERE dbname = NEW.dbname
64-
AND data->>'queryid' = queryid_value
65-
LIMIT 1
66-
) THEN
67-
RETURN NULL; -- Cancels INSERT silently
68-
END IF;
60+
if exists (
61+
select 1
62+
from pgss_queryid_queries
63+
where dbname = new.dbname
64+
and data->>'queryid' = queryid_value
65+
limit 1
66+
) then
67+
return null; -- Cancels INSERT silently
68+
end if;
6969

70-
RETURN NEW;
71-
END;
72-
$$ LANGUAGE plpgsql;
70+
return new;
71+
end;
72+
$$ language plpgsql;
7373

7474

75-
CREATE OR REPLACE TRIGGER enforce_queryid_uniqueness_trigger
76-
BEFORE INSERT
77-
ON pgss_queryid_queries
78-
FOR EACH ROW
79-
EXECUTE FUNCTION enforce_queryid_uniqueness();
75+
create or replace trigger enforce_queryid_uniqueness_trigger
76+
before insert
77+
on pgss_queryid_queries
78+
for each row
79+
execute function enforce_queryid_uniqueness();
80+
81+
-- Create a partitioned table for index definitions with LIST partitioning by dbname
82+
create table if not exists public.index_definitions (
83+
time timestamptz not null,
84+
dbname text not null,
85+
data jsonb not null,
86+
tag_data jsonb
87+
) partition by list (dbname);
88+
89+
-- Create indexes for efficient lookups
90+
create index if not exists index_definitions_dbname_time_idx on public.index_definitions (dbname, time);
91+
92+
-- Set ownership and grant permissions to pgwatch
93+
alter table public.index_definitions owner to pgwatch;
94+
grant all privileges on table public.index_definitions to pgwatch;
95+
96+
-- Create function to enforce index definition uniqueness
97+
create or replace function enforce_index_definition_uniqueness()
98+
returns trigger as $$
99+
declare
100+
index_name text;
101+
schema_name text;
102+
table_name text;
103+
index_definition text;
104+
begin
105+
-- Extract index information from the data JSONB
106+
index_name := new.data->>'indexrelname';
107+
schema_name := new.data->>'schemaname';
108+
table_name := new.data->>'relname';
109+
index_definition := new.data->>'index_definition';
110+
111+
-- Allow NULL index names through
112+
if index_name is null then
113+
return new;
114+
end if;
115+
116+
-- Silently skip if duplicate exists
117+
if exists (
118+
select 1
119+
from index_definitions
120+
where dbname = new.dbname
121+
and data->>'indexrelname' = index_name
122+
and data->>'schemaname' = schema_name
123+
and data->>'relname' = table_name
124+
and data->>'index_definition' = index_definition
125+
limit 1
126+
) then
127+
return null; -- Cancels INSERT silently
128+
end if;
129+
130+
return new;
131+
end;
132+
$$ language plpgsql;
133+
134+
create or replace trigger enforce_index_definition_uniqueness_trigger
135+
before insert
136+
on index_definitions
137+
for each row
138+
execute function enforce_index_definition_uniqueness();
80139

81140

docker-compose.yml

Lines changed: 7 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -34,24 +34,26 @@ services:
3434
"-c",
3535
"pg_stat_statements.track=all",
3636
]
37-
ports:
38-
- "${BIND_HOST:-}55432:5432"
3937
volumes:
4038
- target_db_data:/var/lib/postgresql/data
4139
- ./config/target-db/init.sql:/docker-entrypoint-initdb.d/init.sql
4240

4341
# Postgres Sink - Storage for metrics in PostgreSQL format
42+
# Note: pg_hba.conf is configured to allow passwordless connections (trust)
43+
# for local connections within the Docker network. This simplifies pgwatch
44+
# and postgres-exporter connectivity without compromising security since
45+
# the database is not exposed externally.
4446
sink-postgres:
4547
image: postgres:15
4648
container_name: sink-postgres
4749
environment:
4850
POSTGRES_DB: postgres
4951
POSTGRES_USER: postgres
5052
POSTGRES_PASSWORD: postgres
51-
ports:
52-
- "${BIND_HOST:-}55433:5432"
53+
POSTGRES_HOST_AUTH_METHOD: trust
5354
volumes:
5455
- sink_postgres_data:/var/lib/postgresql/data
56+
- ./config/sink-postgres/00-configure-pg-hba.sh:/docker-entrypoint-initdb.d/00-configure-pg-hba.sh
5557
- ./config/sink-postgres/init.sql:/docker-entrypoint-initdb.d/init.sql
5658

5759
# VictoriaMetrics Sink - Storage for metrics in Prometheus format
@@ -79,11 +81,9 @@ services:
7981
[
8082
"--sources=/etc/pgwatch/sources.yml",
8183
"--metrics=/etc/pgwatch/metrics.yml",
82-
"--sink=postgresql://pgwatch:pgwatchadmin@sink-postgres:5432/measurements",
84+
"--sink=postgresql://pgwatch@sink-postgres:5432/measurements?sslmode=disable",
8385
"--web-addr=:8080",
8486
]
85-
ports:
86-
- "${BIND_HOST:-}58080:8080"
8787
depends_on:
8888
- sources-generator
8989
- sink-postgres
@@ -103,9 +103,6 @@ services:
103103
"--sink=prometheus://0.0.0.0:9091/pgwatch",
104104
"--web-addr=:8089",
105105
]
106-
ports:
107-
- "${BIND_HOST:-}58089:8089"
108-
- "${BIND_HOST:-}59091:9091"
109106
depends_on:
110107
- sources-generator
111108
- sink-prometheus
@@ -143,8 +140,6 @@ services:
143140
- PROMETHEUS_URL=http://sink-prometheus:9090
144141
depends_on:
145142
- sink-prometheus
146-
ports:
147-
- "${BIND_HOST:-}55000:5000"
148143
restart: unless-stopped
149144
# PostgreSQL Reports Generator - Runs reports after 1 hour
150145
postgres-reports:
@@ -194,8 +189,6 @@ services:
194189
image: gcr.io/cadvisor/cadvisor:v0.51.0
195190
container_name: cadvisor
196191
privileged: true
197-
ports:
198-
- "58081:8080"
199192
volumes:
200193
- /:/rootfs:ro
201194
- /var/run:/var/run:ro
@@ -212,8 +205,6 @@ services:
212205
node-exporter:
213206
image: prom/node-exporter:v1.8.2
214207
container_name: node-exporter
215-
ports:
216-
- "59100:9100"
217208
command:
218209
- '--path.rootfs=/host'
219210
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
@@ -227,8 +218,6 @@ services:
227218
container_name: postgres-exporter-sink
228219
environment:
229220
DATA_SOURCE_NAME: "postgresql://postgres:postgres@sink-postgres:5432/measurements?sslmode=disable"
230-
ports:
231-
- "59187:9187"
232221
depends_on:
233222
- sink-postgres
234223
restart: unless-stopped

reporter/postgres_reports.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -78,18 +78,18 @@ def get_index_definitions_from_sink(self) -> Dict[str, str]:
7878
with self.pg_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
7979
# Query the index_definitions table for the most recent data
8080
query = """
81-
SELECT DISTINCT ON (data->>'indexname')
82-
data->>'indexname' as indexname,
81+
SELECT DISTINCT ON (data->>'indexrelname')
82+
data->>'indexrelname' as indexrelname,
8383
data->>'index_definition' as index_definition
8484
FROM public.index_definitions
85-
WHERE data ? 'indexname' AND data ? 'index_definition'
86-
ORDER BY data->>'indexname', time DESC
85+
WHERE data ? 'indexrelname' AND data ? 'index_definition'
86+
ORDER BY data->>'indexrelname', time DESC
8787
"""
8888
cursor.execute(query)
8989

9090
for row in cursor.fetchall():
91-
if row['indexname']:
92-
index_definitions[row['indexname']] = row['index_definition']
91+
if row['indexrelname']:
92+
index_definitions[row['indexrelname']] = row['index_definition']
9393

9494
except Exception as e:
9595
print(f"Error fetching index definitions from Postgres sink: {e}")
@@ -1846,9 +1846,9 @@ def make_request(api_url, endpoint, request_data):
18461846
def main():
18471847
parser = argparse.ArgumentParser(description='Generate PostgreSQL reports using PromQL')
18481848
parser.add_argument('--prometheus-url', default='http://sink-prometheus:9090',
1849-
help='Prometheus URL (default: http://sink-prometheus:9090 for Docker, use http://localhost:59090 for external access)')
1850-
parser.add_argument('--postgres-sink-url', default='postgresql://pgwatch:pgwatchadmin@sink-postgres:5432/measurements',
1851-
help='Postgres sink connection string (default: postgresql://pgwatch:pgwatchadmin@sink-postgres:5432/measurements for Docker, use postgresql://pgwatch:pgwatchadmin@localhost:55433/measurements for external access)')
1849+
help='Prometheus URL (default: http://sink-prometheus:9090)')
1850+
parser.add_argument('--postgres-sink-url', default='postgresql://pgwatch@sink-postgres:5432/measurements',
1851+
help='Postgres sink connection string (default: postgresql://pgwatch@sink-postgres:5432/measurements)')
18521852
parser.add_argument('--cluster', default='local',
18531853
help='Cluster name (default: local)')
18541854
parser.add_argument('--node-name', default='node-01',

0 commit comments

Comments
 (0)