Cache invalidation is one of the two hard problems in computer science. Here’s how to make it less painful.

The Caching Patterns

Cache-Aside (Lazy Loading)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
def get_user(user_id: str) -> dict:
    # Check cache first
    cached = redis.get(f"user:{user_id}")
    if cached:
        return json.loads(cached)
    
    # Cache miss: fetch from database
    user = db.query("SELECT * FROM users WHERE id = %s", user_id)
    
    # Store in cache for next time
    redis.setex(f"user:{user_id}", 3600, json.dumps(user))
    
    return user

Pros: Only caches what’s actually used Cons: First request always slow (cache miss)

Write-Through

1
2
3
4
5
6
7
def update_user(user_id: str, data: dict):
    # Update database
    db.execute("UPDATE users SET ... WHERE id = %s", user_id)
    
    # Update cache immediately
    user = db.query("SELECT * FROM users WHERE id = %s", user_id)
    redis.setex(f"user:{user_id}", 3600, json.dumps(user))

Pros: Cache always consistent with database Cons: Write latency increases

Write-Behind (Write-Back)

1
2
3
4
5
6
def update_user(user_id: str, data: dict):
    # Update cache immediately
    redis.setex(f"user:{user_id}", 3600, json.dumps(data))
    
    # Queue database write for later
    queue.push("db_writes", {"table": "users", "id": user_id, "data": data})

Pros: Fast writes Cons: Risk of data loss if cache fails before flush

Read-Through

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# Cache handles the loading logic
class UserCache:
    def get(self, user_id: str) -> dict:
        cached = redis.get(f"user:{user_id}")
        if cached:
            return json.loads(cached)
        
        user = self.load_from_db(user_id)
        redis.setex(f"user:{user_id}", 3600, json.dumps(user))
        return user
    
    def load_from_db(self, user_id: str) -> dict:
        return db.query("SELECT * FROM users WHERE id = %s", user_id)

Same as cache-aside, but encapsulated in the cache layer.

Invalidation Strategies

Time-Based (TTL)

1
2
3
4
5
6
# Expire after 1 hour
redis.setex("user:123", 3600, data)

# Jittered TTL to prevent stampede
ttl = 3600 + random.randint(0, 300)
redis.setex("user:123", ttl, data)

Simple but imprecise. Data might be stale within TTL.

Event-Based

1
2
3
4
5
6
def update_user(user_id: str, data: dict):
    db.execute("UPDATE users SET ... WHERE id = %s", user_id)
    redis.delete(f"user:{user_id}")  # Invalidate immediately
    
    # Or publish event for other services
    redis.publish("cache_invalidation", f"user:{user_id}")

Precise but requires coordination.

Version-Based

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
def get_user(user_id: str, version: int) -> dict:
    cache_key = f"user:{user_id}:v{version}"
    cached = redis.get(cache_key)
    if cached:
        return json.loads(cached)
    
    user = db.query("SELECT * FROM users WHERE id = %s", user_id)
    redis.setex(cache_key, 3600, json.dumps(user))
    return user

# On update, increment version
def update_user(user_id: str, data: dict):
    db.execute("UPDATE users SET version = version + 1, ... WHERE id = %s", user_id)
    # Old cache entries naturally become orphaned

Old versions expire naturally. No explicit invalidation needed.

Cache Stampede Prevention

When cache expires, many requests hit the database simultaneously.

Locking

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
def get_user_with_lock(user_id: str) -> dict:
    cache_key = f"user:{user_id}"
    lock_key = f"lock:{cache_key}"
    
    cached = redis.get(cache_key)
    if cached:
        return json.loads(cached)
    
    # Try to acquire lock
    if redis.set(lock_key, "1", nx=True, ex=5):
        try:
            user = db.query("SELECT * FROM users WHERE id = %s", user_id)
            redis.setex(cache_key, 3600, json.dumps(user))
            return user
        finally:
            redis.delete(lock_key)
    else:
        # Another process is loading, wait and retry
        time.sleep(0.1)
        return get_user_with_lock(user_id)

Early Expiration

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
def get_user(user_id: str) -> dict:
    cached = redis.get(f"user:{user_id}")
    if cached:
        data = json.loads(cached)
        
        # Refresh in background if expiring soon
        ttl = redis.ttl(f"user:{user_id}")
        if ttl < 300:  # Less than 5 minutes
            background_refresh(user_id)
        
        return data
    
    return fetch_and_cache(user_id)

What to Cache

Good candidates:

  • Expensive database queries
  • API responses from slow services
  • Computed/aggregated data
  • Session data
  • Configuration that rarely changes

Bad candidates:

  • Rapidly changing data
  • User-specific data that varies per request
  • Large objects (memory pressure)
  • Security-sensitive data

Cache Key Design

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# Bad: collision risk
cache_key = f"user:{name}"

# Good: unique identifier
cache_key = f"user:id:{user_id}"

# Include relevant parameters
cache_key = f"search:q:{query}:page:{page}:limit:{limit}"

# Hash complex parameters
import hashlib
params_hash = hashlib.md5(json.dumps(params, sort_keys=True).encode()).hexdigest()
cache_key = f"report:{params_hash}"

Multi-Level Caching

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
class MultiLevelCache:
    def __init__(self):
        self.local = {}  # In-memory (fastest)
        self.redis = Redis()  # Distributed (shared)
    
    def get(self, key: str):
        # Check local first
        if key in self.local:
            return self.local[key]
        
        # Check Redis
        cached = self.redis.get(key)
        if cached:
            value = json.loads(cached)
            self.local[key] = value  # Populate local
            return value
        
        return None
    
    def set(self, key: str, value, ttl: int = 3600):
        self.local[key] = value
        self.redis.setex(key, ttl, json.dumps(value))

HTTP Caching Headers

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
from fastapi import Response

@app.get("/api/products/{id}")
def get_product(id: str, response: Response):
    product = get_cached_product(id)
    
    # Cache in browser/CDN for 5 minutes
    response.headers["Cache-Control"] = "public, max-age=300"
    
    # ETag for conditional requests
    response.headers["ETag"] = f'"{product["version"]}"'
    
    return product

Monitoring Cache Health

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
from prometheus_client import Counter, Gauge

cache_hits = Counter('cache_hits_total', 'Cache hits', ['cache_name'])
cache_misses = Counter('cache_misses_total', 'Cache misses', ['cache_name'])
cache_size = Gauge('cache_size_bytes', 'Cache memory usage')

def get_cached(key: str):
    value = redis.get(key)
    if value:
        cache_hits.labels(cache_name="redis").inc()
        return value
    cache_misses.labels(cache_name="redis").inc()
    return None

Key metrics:

  • Hit rate (should be >90% for effective cache)
  • Memory usage
  • Eviction rate
  • Latency (p50, p99)

The Golden Rules

  1. Cache at the right level — closer to the user = faster, harder to invalidate
  2. Set TTLs on everything — forgotten caches become memory leaks
  3. Invalidate explicitly when possible — TTL is a fallback, not a strategy
  4. Monitor hit rates — low hit rate = wrong cache key or wrong data
  5. Plan for cache failure — your app should work (slowly) without cache

The best cache is one you don’t have to think about. Invest in the invalidation strategy upfront.