You don’t need to be a DBA to work effectively with PostgreSQL. Here’s what developers need to know for day-to-day operations.

Connection Basics

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# Connect
psql -h localhost -U myuser -d mydb

# With password prompt
psql -h localhost -U myuser -d mydb -W

# Connection string
psql "postgresql://user:pass@localhost:5432/mydb"

# Common options
psql -c "SELECT 1"           # Run single command
psql -f script.sql           # Run file
psql -A -t -c "SELECT 1"     # Unaligned, tuples only

psql Commands

\\\\\\\\\\\lcddddddxtqttiufid+tmbainbnalgmeenameLCLLDLLLTTQioiieiiioousnssssssggitnttctttggterlldcttiiufeeataabnsutbbedeneqatllercxuboeetxstpeassae/iarsdbsronyeawlondstielsetatedibhsmaoissuneitgzpeust

Essential Queries

Table Information

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
-- Table sizes
SELECT 
    tablename,
    pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS size
FROM pg_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;

-- Row counts (estimate)
SELECT 
    relname AS table,
    reltuples::bigint AS row_estimate
FROM pg_class
WHERE relkind = 'r' AND relnamespace = 'public'::regnamespace
ORDER BY reltuples DESC;

-- Exact row counts (slow on large tables)
SELECT 
    'SELECT ''' || tablename || ''' AS table, COUNT(*) FROM ' || tablename || ' UNION ALL'
FROM pg_tables
WHERE schemaname = 'public';

Index Information

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
-- Index sizes
SELECT
    indexrelname AS index,
    pg_size_pretty(pg_relation_size(indexrelid)) AS size
FROM pg_stat_user_indexes
ORDER BY pg_relation_size(indexrelid) DESC;

-- Unused indexes
SELECT
    schemaname || '.' || relname AS table,
    indexrelname AS index,
    idx_scan AS scans
FROM pg_stat_user_indexes
WHERE idx_scan = 0
ORDER BY pg_relation_size(indexrelid) DESC;

-- Index usage
SELECT
    relname AS table,
    indexrelname AS index,
    idx_scan AS scans,
    idx_tup_read AS tuples_read,
    idx_tup_fetch AS tuples_fetched
FROM pg_stat_user_indexes
ORDER BY idx_scan DESC;

Active Queries

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
-- Running queries
SELECT 
    pid,
    now() - pg_stat_activity.query_start AS duration,
    query,
    state
FROM pg_stat_activity
WHERE state != 'idle'
ORDER BY duration DESC;

-- Long-running queries (> 5 minutes)
SELECT 
    pid,
    now() - pg_stat_activity.query_start AS duration,
    query
FROM pg_stat_activity
WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes'
  AND state != 'idle';

-- Kill a query
SELECT pg_cancel_backend(pid);     -- Graceful
SELECT pg_terminate_backend(pid);  -- Force

Lock Investigation

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
-- Blocked queries
SELECT
    blocked_locks.pid AS blocked_pid,
    blocked_activity.usename AS blocked_user,
    blocking_locks.pid AS blocking_pid,
    blocking_activity.usename AS blocking_user,
    blocked_activity.query AS blocked_query
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity 
    ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks 
    ON blocking_locks.locktype = blocked_locks.locktype
    AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
    AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
    AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
    AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
    AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
    AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
    AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
    AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
    AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
    AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity 
    ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.granted;

EXPLAIN ANALYZE

1
2
3
4
5
6
7
8
-- Show query plan with actual timing
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@example.com';

-- With buffers (I/O stats)
EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM users WHERE email = 'test@example.com';

-- Format as JSON
EXPLAIN (ANALYZE, FORMAT JSON) SELECT * FROM users WHERE email = 'test@example.com';

Key things to look for:

  • Seq Scan on large tables (missing index?)
  • Nested Loop with high row counts
  • Sort operations (memory vs disk)
  • Actual vs estimated rows (statistics stale?)

Index Strategies

B-tree (Default)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
-- Simple index
CREATE INDEX idx_users_email ON users(email);

-- Unique index
CREATE UNIQUE INDEX idx_users_email ON users(email);

-- Partial index
CREATE INDEX idx_orders_pending ON orders(created_at) 
WHERE status = 'pending';

-- Multi-column index
CREATE INDEX idx_orders_user_date ON orders(user_id, created_at DESC);

Expression Index

1
2
3
4
5
-- Index on lowercase email
CREATE INDEX idx_users_email_lower ON users(LOWER(email));

-- Query must match expression
SELECT * FROM users WHERE LOWER(email) = 'test@example.com';

GIN Index (Arrays, JSON)

1
2
3
4
5
6
7
-- JSONB containment
CREATE INDEX idx_users_metadata ON users USING gin(metadata);
SELECT * FROM users WHERE metadata @> '{"role": "admin"}';

-- Array contains
CREATE INDEX idx_posts_tags ON posts USING gin(tags);
SELECT * FROM posts WHERE tags @> ARRAY['postgresql'];

BRIN Index (Large Sequential Data)

1
2
-- Time-series data
CREATE INDEX idx_events_created ON events USING brin(created_at);

Maintenance

VACUUM and ANALYZE

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
-- Update statistics
ANALYZE users;
ANALYZE;  -- All tables

-- Reclaim space
VACUUM users;
VACUUM FULL users;  -- Locks table, reclaims more space

-- Check auto-vacuum status
SELECT 
    schemaname, relname, 
    last_vacuum, last_autovacuum,
    last_analyze, last_autoanalyze
FROM pg_stat_user_tables;

REINDEX

1
2
3
4
5
6
7
8
-- Rebuild index
REINDEX INDEX idx_users_email;

-- Rebuild all indexes on table
REINDEX TABLE users;

-- Concurrent (doesn't lock)
REINDEX INDEX CONCURRENTLY idx_users_email;

Backup and Restore

pg_dump

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
# Dump single database
pg_dump -h localhost -U myuser mydb > backup.sql

# Custom format (compressed, allows selective restore)
pg_dump -Fc -h localhost -U myuser mydb > backup.dump

# Dump specific tables
pg_dump -t users -t orders mydb > tables.sql

# Dump schema only
pg_dump -s mydb > schema.sql

# Dump data only
pg_dump -a mydb > data.sql

Restore

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
# Plain SQL
psql mydb < backup.sql

# Custom format
pg_restore -d mydb backup.dump

# Create database and restore
pg_restore -C -d postgres backup.dump

# Selective restore
pg_restore -t users -d mydb backup.dump

pg_dumpall

1
2
3
4
5
# All databases + globals (roles, etc.)
pg_dumpall > full_backup.sql

# Globals only
pg_dumpall --globals-only > globals.sql

Schema Changes

Add Column

1
2
3
4
5
6
7
8
-- Nullable column (instant)
ALTER TABLE users ADD COLUMN bio TEXT;

-- With default (scans table in older PostgreSQL)
ALTER TABLE users ADD COLUMN active BOOLEAN DEFAULT true;

-- PostgreSQL 11+: constant default is instant
ALTER TABLE users ADD COLUMN created_at TIMESTAMP DEFAULT NOW();

Safe Index Creation

1
2
3
4
-- Non-blocking (recommended for production)
CREATE INDEX CONCURRENTLY idx_users_email ON users(email);

-- Note: CONCURRENTLY can't be in transaction

Safe Migrations

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
-- Add column
ALTER TABLE users ADD COLUMN new_col TEXT;

-- Backfill in batches
UPDATE users SET new_col = 'value' WHERE id BETWEEN 1 AND 10000;
UPDATE users SET new_col = 'value' WHERE id BETWEEN 10001 AND 20000;

-- Add constraint
ALTER TABLE users ADD CONSTRAINT users_new_col_check CHECK (new_col IS NOT NULL) NOT VALID;
ALTER TABLE users VALIDATE CONSTRAINT users_new_col_check;

Connection Pooling

Check Connections

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
-- Current connections
SELECT count(*) FROM pg_stat_activity;

-- By state
SELECT state, count(*) 
FROM pg_stat_activity 
GROUP BY state;

-- By application
SELECT application_name, count(*)
FROM pg_stat_activity
GROUP BY application_name;

Connection Limits

1
2
3
4
5
-- View settings
SHOW max_connections;

-- Per-user limits
ALTER USER myuser CONNECTION LIMIT 10;

Use PgBouncer for connection pooling in production.

Useful Settings

For Development

1
2
3
4
5
6
7
-- In session
SET statement_timeout = '30s';
SET lock_timeout = '10s';

-- Show current values
SHOW statement_timeout;
SHOW work_mem;

Common Tuning

##swm#m#wm#llhoaaaaoopMariCxWlxLggoerkno_A__o__sme_tncLlwgsmtodmenoeagtigr_enenvlianrybmacne_nt_euntelsgedsf=cicemuqfeot=nerle1_nidna.r6wsorettcsMonerioBrsps=on=klnf_=i='_2mcas5e1a3lt6m0laM0'tB=em2e5n6tMB=100####02PFF5eoo#%rrr-LooVdofpAegeCbRrUuqAaUguMtMgei,ironinCgeRsmEeA>mToE1rsyINDEX

Quick Diagnostics

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
-- Database size
SELECT pg_size_pretty(pg_database_size('mydb'));

-- Largest tables
SELECT relname, pg_size_pretty(pg_total_relation_size(relid))
FROM pg_stat_user_tables
ORDER BY pg_total_relation_size(relid) DESC
LIMIT 10;

-- Cache hit ratio (should be > 99%)
SELECT 
    sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) AS ratio
FROM pg_statio_user_tables;

-- Connection states
SELECT state, count(*) FROM pg_stat_activity GROUP BY state;

-- Replication lag (if applicable)
SELECT client_addr, state, 
       pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn) AS lag_bytes
FROM pg_stat_replication;

PostgreSQL is remarkably capable once you know how to look under the hood. These queries solve 90% of operational questions.