Docker Compose turns “works on my machine” into “works everywhere.” Here’s how to structure it for real development workflows.

Basic Structure

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
# docker-compose.yml
services:
  app:
    build: .
    ports:
      - "3000:3000"
    volumes:
      - .:/app
    environment:
      - NODE_ENV=development

  db:
    image: postgres:15
    environment:
      POSTGRES_PASSWORD: devpass

Start everything:

1
2
docker compose up
docker compose up -d  # Detached

Development vs Production

Use multiple files:

1
2
3
4
5
6
7
# docker-compose.yml (base)
services:
  app:
    image: myapp:latest
    
  db:
    image: postgres:15
1
2
3
4
5
6
7
8
# docker-compose.override.yml (auto-loaded for dev)
services:
  app:
    build: .
    volumes:
      - .:/app
    environment:
      - DEBUG=true
1
2
3
4
5
6
# docker-compose.prod.yml
services:
  app:
    restart: always
    environment:
      - DEBUG=false

Usage:

1
2
3
4
5
# Development (loads override automatically)
docker compose up

# Production
docker compose -f docker-compose.yml -f docker-compose.prod.yml up

Service Dependencies

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
services:
  app:
    build: .
    depends_on:
      db:
        condition: service_healthy
      redis:
        condition: service_started

  db:
    image: postgres:15
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U postgres"]
      interval: 5s
      timeout: 5s
      retries: 5

  redis:
    image: redis:7
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 5s
      timeout: 5s
      retries: 5

Volume Patterns

Named Volumes (Persistent)

1
2
3
4
5
6
7
8
services:
  db:
    image: postgres:15
    volumes:
      - postgres_data:/var/lib/postgresql/data

volumes:
  postgres_data:

Bind Mounts (Development)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
services:
  app:
    build: .
    volumes:
      # Source code
      - .:/app
      # Exclude node_modules (use container's version)
      - /app/node_modules
      # Config file
      - ./config/dev.json:/app/config/config.json:ro

Anonymous Volumes (Exclude from bind)

1
2
3
4
5
6
services:
  app:
    volumes:
      - .:/app
      - /app/node_modules    # Anonymous, not synced
      - /app/.next           # Build cache stays in container

Environment Variables

Inline

1
2
3
4
5
services:
  app:
    environment:
      - DATABASE_URL=postgres://user:pass@db/myapp
      - REDIS_URL=redis://redis:6379

From File

1
2
3
4
5
services:
  app:
    env_file:
      - .env
      - .env.local

Variable Substitution

1
2
3
4
5
# Uses HOST_PORT from shell environment
services:
  app:
    ports:
      - "${HOST_PORT:-3000}:3000"

Networking

Default Network

Services can reach each other by service name:

1
2
3
4
5
6
services:
  app:
    environment:
      - DB_HOST=db  # Just use service name
  db:
    image: postgres:15

Custom Networks

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
services:
  frontend:
    networks:
      - frontend

  api:
    networks:
      - frontend
      - backend

  db:
    networks:
      - backend

networks:
  frontend:
  backend:

External Networks

1
2
3
networks:
  proxy:
    external: true

Build Configuration

1
2
3
4
5
6
7
8
services:
  app:
    build:
      context: .
      dockerfile: Dockerfile.dev
      args:
        - NODE_VERSION=20
      target: development

Multi-Stage Builds

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# Dockerfile
FROM node:20 AS base
WORKDIR /app
COPY package*.json ./

FROM base AS development
RUN npm install
CMD ["npm", "run", "dev"]

FROM base AS production
RUN npm ci --only=production
COPY . .
CMD ["npm", "start"]
1
2
3
4
5
services:
  app:
    build:
      context: .
      target: development  # or production

Common Service Patterns

PostgreSQL

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
services:
  postgres:
    image: postgres:15-alpine
    environment:
      POSTGRES_USER: myapp
      POSTGRES_PASSWORD: secret
      POSTGRES_DB: myapp_dev
    volumes:
      - postgres_data:/var/lib/postgresql/data
      - ./init.sql:/docker-entrypoint-initdb.d/init.sql
    ports:
      - "5432:5432"
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U myapp"]
      interval: 10s
      timeout: 5s
      retries: 5

volumes:
  postgres_data:

Redis

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
services:
  redis:
    image: redis:7-alpine
    command: redis-server --appendonly yes
    volumes:
      - redis_data:/data
    ports:
      - "6379:6379"
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 10s
      timeout: 5s
      retries: 5

volumes:
  redis_data:

MongoDB

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
services:
  mongo:
    image: mongo:6
    environment:
      MONGO_INITDB_ROOT_USERNAME: root
      MONGO_INITDB_ROOT_PASSWORD: secret
    volumes:
      - mongo_data:/data/db
    ports:
      - "27017:27017"

volumes:
  mongo_data:

Nginx Reverse Proxy

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
services:
  nginx:
    image: nginx:alpine
    ports:
      - "80:80"
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
    depends_on:
      - api
      - frontend

Full Stack Example

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
services:
  frontend:
    build:
      context: ./frontend
      target: development
    ports:
      - "3000:3000"
    volumes:
      - ./frontend:/app
      - /app/node_modules
    environment:
      - VITE_API_URL=http://localhost:8080
    depends_on:
      - api

  api:
    build:
      context: ./api
      target: development
    ports:
      - "8080:8080"
    volumes:
      - ./api:/app
    environment:
      - DATABASE_URL=postgres://app:secret@db/myapp
      - REDIS_URL=redis://redis:6379
    depends_on:
      db:
        condition: service_healthy
      redis:
        condition: service_healthy

  worker:
    build:
      context: ./api
    command: python worker.py
    volumes:
      - ./api:/app
    environment:
      - DATABASE_URL=postgres://app:secret@db/myapp
      - REDIS_URL=redis://redis:6379
    depends_on:
      db:
        condition: service_healthy
      redis:
        condition: service_healthy

  db:
    image: postgres:15-alpine
    environment:
      POSTGRES_USER: app
      POSTGRES_PASSWORD: secret
      POSTGRES_DB: myapp
    volumes:
      - postgres_data:/var/lib/postgresql/data
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U app"]
      interval: 5s
      timeout: 5s
      retries: 5

  redis:
    image: redis:7-alpine
    volumes:
      - redis_data:/data
    healthcheck:
      test: ["CMD", "redis-cli", "ping"]
      interval: 5s
      timeout: 5s
      retries: 5

volumes:
  postgres_data:
  redis_data:

Useful Commands

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# Start services
docker compose up -d

# View logs
docker compose logs -f
docker compose logs -f api

# Rebuild
docker compose build
docker compose up -d --build

# Stop
docker compose stop

# Stop and remove
docker compose down

# Remove volumes too
docker compose down -v

# Run one-off command
docker compose run --rm api python manage.py migrate

# Execute in running container
docker compose exec api bash

# Scale service
docker compose up -d --scale worker=3

# List services
docker compose ps

Profiles

Run specific service groups:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
services:
  app:
    # Always runs
    
  debug-tools:
    image: busybox
    profiles:
      - debug
      
  monitoring:
    image: prometheus
    profiles:
      - monitoring
1
2
3
4
5
6
7
8
# Normal
docker compose up

# With debug tools
docker compose --profile debug up

# Multiple profiles
docker compose --profile debug --profile monitoring up

Resource Limits

1
2
3
4
5
6
7
8
9
services:
  app:
    deploy:
      resources:
        limits:
          cpus: '0.5'
          memory: 512M
        reservations:
          memory: 256M

Docker Compose makes complex local environments simple. Define once, run anywhere.