Nginx is everywhere: reverse proxy, load balancer, static file server, SSL terminator. Its configuration syntax is powerful but has gotchas that catch everyone at least once.

These patterns cover common use cases done right.

Basic Reverse Proxy

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
upstream backend {
    server 127.0.0.1:3000;
}

server {
    listen 80;
    server_name example.com;

    location / {
        proxy_pass http://backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
    }
}

Always set those headers — your backend needs to know the real client IP and protocol.

SSL/TLS Configuration

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
server {
    listen 443 ssl http2;
    server_name example.com;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    # Modern SSL settings
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
    ssl_prefer_server_ciphers off;

    # SSL session caching
    ssl_session_timeout 1d;
    ssl_session_cache shared:SSL:50m;
    ssl_session_tickets off;

    # OCSP stapling
    ssl_stapling on;
    ssl_stapling_verify on;
    resolver 8.8.8.8 8.8.4.4 valid=300s;

    location / {
        proxy_pass http://backend;
    }
}

# Redirect HTTP to HTTPS
server {
    listen 80;
    server_name example.com;
    return 301 https://$server_name$request_uri;
}

Load Balancing

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
upstream backend {
    # Round robin (default)
    server 10.0.1.1:3000;
    server 10.0.1.2:3000;
    server 10.0.1.3:3000;

    # Weights
    server 10.0.1.1:3000 weight=3;
    server 10.0.1.2:3000 weight=1;

    # Backup server
    server 10.0.1.4:3000 backup;

    # Health checks (passive)
    server 10.0.1.1:3000 max_fails=3 fail_timeout=30s;
}

# Or use different algorithms
upstream backend_leastconn {
    least_conn;
    server 10.0.1.1:3000;
    server 10.0.1.2:3000;
}

upstream backend_iphash {
    ip_hash;  # Session persistence
    server 10.0.1.1:3000;
    server 10.0.1.2:3000;
}

Static Files with Caching

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
server {
    listen 80;
    server_name static.example.com;
    root /var/www/static;

    # Cache static assets aggressively
    location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff2)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
        access_log off;
    }

    # Gzip compression
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_types text/plain text/css application/json application/javascript text/xml application/xml;
}

API Rate Limiting

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
# Define rate limit zones
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login_limit:10m rate=1r/s;

server {
    location /api/ {
        limit_req zone=api_limit burst=20 nodelay;
        proxy_pass http://backend;
    }

    location /api/login {
        limit_req zone=login_limit burst=5;
        proxy_pass http://backend;
    }
}

burst allows temporary spikes. nodelay processes burst immediately rather than queuing.

WebSocket Proxy

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
map $http_upgrade $connection_upgrade {
    default upgrade;
    '' close;
}

server {
    location /ws/ {
        proxy_pass http://websocket_backend;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection $connection_upgrade;
        proxy_set_header Host $host;
        
        # Longer timeouts for WebSocket
        proxy_read_timeout 86400;
        proxy_send_timeout 86400;
    }
}

Security Headers

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
server {
    # Security headers
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-Content-Type-Options "nosniff" always;
    add_header X-XSS-Protection "1; mode=block" always;
    add_header Referrer-Policy "strict-origin-when-cross-origin" always;
    add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline';" always;

    # HSTS (only if you're sure about HTTPS)
    add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;

    # Hide nginx version
    server_tokens off;
}

Request Size Limits

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
server {
    # Max upload size
    client_max_body_size 100M;

    # Buffer sizes
    client_body_buffer_size 128k;
    proxy_buffer_size 128k;
    proxy_buffers 4 256k;
    proxy_busy_buffers_size 256k;

    location /upload {
        client_max_body_size 500M;  # Override for uploads
        proxy_pass http://backend;
    }
}

Timeouts

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
server {
    # Client timeouts
    client_body_timeout 60s;
    client_header_timeout 60s;
    send_timeout 60s;

    # Proxy timeouts
    proxy_connect_timeout 60s;
    proxy_send_timeout 60s;
    proxy_read_timeout 60s;

    location /long-running {
        proxy_read_timeout 300s;  # 5 minutes for slow endpoints
        proxy_pass http://backend;
    }
}

Logging

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
# Custom log format
log_format detailed '$remote_addr - $remote_user [$time_local] '
                    '"$request" $status $body_bytes_sent '
                    '"$http_referer" "$http_user_agent" '
                    'rt=$request_time uct=$upstream_connect_time '
                    'uht=$upstream_header_time urt=$upstream_response_time';

server {
    access_log /var/log/nginx/access.log detailed;
    error_log /var/log/nginx/error.log warn;

    # Disable logging for health checks
    location /health {
        access_log off;
        return 200 "OK";
    }
}

Multiple Domains

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# Default server (catch-all)
server {
    listen 80 default_server;
    server_name _;
    return 444;  # Close connection without response
}

# Domain 1
server {
    listen 80;
    server_name app.example.com;
    location / {
        proxy_pass http://app_backend;
    }
}

# Domain 2
server {
    listen 80;
    server_name api.example.com;
    location / {
        proxy_pass http://api_backend;
    }
}

Maintenance Mode

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
server {
    # Check for maintenance file
    if (-f /var/www/maintenance.html) {
        return 503;
    }

    error_page 503 @maintenance;
    location @maintenance {
        root /var/www;
        rewrite ^(.*)$ /maintenance.html break;
    }

    location / {
        proxy_pass http://backend;
    }
}

Enable maintenance: touch /var/www/maintenance.html Disable: rm /var/www/maintenance.html

Testing Configuration

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
# Syntax check
nginx -t

# Test with specific config
nginx -t -c /etc/nginx/nginx.conf

# Reload without downtime
nginx -s reload

# Full restart
systemctl restart nginx

Common Gotchas

Trailing slashes matter:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
# These behave differently!
location /api {
    proxy_pass http://backend;
}
# Request: /api/users → backend: /api/users

location /api/ {
    proxy_pass http://backend/;
}
# Request: /api/users → backend: /users (stripped!)

If is evil (sometimes):

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# Avoid if inside location when possible
# This can cause unexpected behavior
location / {
    if ($request_uri ~* "^/old") {
        return 301 /new;
    }
}

# Better: use separate location
location /old {
    return 301 /new;
}

Nginx configuration is declarative but subtle. Test every change with nginx -t. Use separate server blocks for different domains. Set appropriate timeouts for your workload.

The configuration that works under test traffic may fail under production load. Monitor your 502s and 504s — they’re often nginx telling you something is misconfigured.