Design and implement a Weighted LRU (Least Recently Used) Cache that extends the traditional LRU cache by assigning a size (or weight) to each item. Unlike a standard LRU cache where each item counts as 1 toward the capacity, in a weighted LRU cache, the capacity is calculated as the sum of all item sizes.
This variant is commonly used in systems where cached items have varying memory footprints, such as:
Your implementation should support two core operations: [Source: darkinterview.com]
get(key): Retrieve the value associated with the key. Returns -1 if the key doesn't exist.put(key, value, size): Insert or update a key-value pair with an associated size. If adding the item causes the total size to exceed capacity, evict the least recently used items until there's enough space.# Capacity is 10 (total weight, not item count)
cache = WeightedLRUCache(capacity=10)
cache.put("a", 1, 3) # Cache: {"a": (1, size=3)} -> total size = 3
cache.put("b", 2, 4) # Cache: {"a": (1, 3), "b": (2, 4)} -> total size = 7
cache.put("c", 3, 5) # Exceeds capacity (7 + 5 = 12 > 10)
# Evict "a" (LRU, size=3) -> total size = 4
# Now add "c" -> total size = 9
# Cache: {"b": (2, 4), "c": (3, 5)}
cache.get("a") # Returns -1 (evicted)
cache.get("b") # Returns 2 (marks "b" as recently used)
cache.put("d", 4, 3) # Would exceed (9 + 3 = 12 > 10)
# Evict "c" (LRU, size=5) -> total size = 4
# Add "d" -> total size = 7
# Cache: {"b": (2, 4), "d": (4, 3)}
Implement a WeightedLRUCache class with the following interface:
class WeightedLRUCache:
def __init__(self, capacity: int):
"""
Initialize the cache with a maximum total capacity (by weight).
Args:
capacity: Maximum total size of all cached items
"""
pass
def get(self, key: str) -> int:
"""
Get the value associated with the key.
Returns:
The value if key exists, -1 otherwise.
"""
pass
def put(self, key: str, value: int, size: int) -> None:
"""
Insert or update a key-value pair with the given size.
If the key already exists, update its value and size.
If adding the item exceeds capacity, evict LRU items until there's room.
"""
pass
# Test 1: Basic operations
cache = WeightedLRUCache(10)
cache.put("a", 1, 5)
assert cache.get("a") == 1
# Test 2: Eviction when full
cache = WeightedLRUCache(10)
cache.put("a", 1, 6)
cache.put("b", 2, 6) # Evicts "a"
assert cache.get("a") == -1
assert cache.get("b") == 2
# Test 3: LRU ordering with get()
cache = WeightedLRUCache(10)
cache.put("a", 1, 4)
cache.put("b", 2, 4)
cache.get("a") # "a" is now most recent
cache.put("c", 3, 4) # Evicts "b" (LRU), not "a"
assert cache.get("b") == -1
assert cache.get("a") == 1
Extend your implementation to handle various edge cases that occur in production systems.
put() is called with an existing key but different sizecache = WeightedLRUCache(10)
# Case 1: Item larger than capacity
cache.put("huge", 1, 15) # Size 15 > capacity 10
# Options: raise error, don't insert, or clear cache and insert anyway
# Case 2: Update with different size
cache.put("a", 1, 3)
cache.put("a", 100, 8) # Same key, different size
# Total size changes from 3 to 8
# Case 3: Multiple evictions
cache = WeightedLRUCache(10)
cache.put("a", 1, 3)
cache.put("b", 2, 3)
cache.put("c", 3, 3) # Total = 9
cache.put("d", 4, 8) # Needs to evict multiple items
# Must evict "a", "b", and "c" to fit "d"
# Case 4: Exact fit after eviction
cache = WeightedLRUCache(10)
cache.put("a", 1, 5)
cache.put("b", 2, 5) # Total = 10 (exactly at capacity)
cache.put("c", 3, 5) # Evict "a", add "c", total remains 10
Optimize your implementation for O(1) time complexity for both get() and put() operations. [Source: darkinterview.com]
get() should be O(1)put() should be O(1) amortized (noting that worst case involves evicting multiple items)get() update the item's access time (making it "most recently used")?Strategy:
OrderedDict which maintains insertion order and provides O(1) accessmove_to_end() on access to update LRU orderingExample Implementation:
from collections import OrderedDict
class WeightedLRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.cache = OrderedDict() # key -> (value, size)
self.current_size = 0
def get(self, key: str) -> int:
if key not in self.cache:
return -1
# Move to end to mark as recently used
self.cache.move_to_end(key)
return self.cache[key][0] # Return value
def put(self, key: str, value: int, size: int) -> None:
# If key exists, remove its size from current total
if key in self.cache:
old_value, old_size = self.cache[key]
self.current_size -= old_size
del self.cache[key]
# Evict LRU items until we have enough space
while self.current_size + size > self.capacity and self.cache:
oldest_key, (oldest_value, oldest_size) = .cache.popitem(last=)
.current_size -= oldest_size
size <= .capacity:
.cache[key] = (value, size)
.current_size += size
Time Complexity: [Source: darkinterview.com]
get(): O(1) - dictionary lookup and move_to_endput(): O(k) amortized where k is the number of items to evictSpace Complexity:
Enhanced Implementation:
from collections import OrderedDict
class WeightedLRUCache:
def __init__(self, capacity: int):
if capacity <= 0:
raise ValueError("Capacity must be positive")
self.capacity = capacity
self.cache = OrderedDict()
self.current_size = 0
def get(self, key: str) -> int:
if key not in self.cache:
return -1
self.cache.move_to_end(key)
return self.cache[key][0]
def put(self, key: str, value: int, size: int) -> None:
if size <= 0:
raise ValueError("Size must be positive")
# Handle item larger than capacity
if size > self.capacity:
# Option 1: Raise error
raise ValueError(f"Item size {size} exceeds capacity {self.capacity}")
# Option 2: Clear cache and don't insert (just return)
# self.cache.clear()
# self.current_size = 0
key .cache:
_, old_size = .cache[key]
.current_size -= old_size
.cache[key]
.current_size + size > .capacity:
oldest_key, (_, oldest_size) = .cache.popitem(last=)
.current_size -= oldest_size
.cache[key] = (value, size)
.current_size += size
() -> :
.current_size
() -> :
(.cache)
Edge Case Behaviors: [Source: darkinterview.com]
| Edge Case | Behavior | Rationale |
|---|---|---|
| Item > capacity | Raise ValueError | Prevents silent failures |
| Update existing key | Adjust size difference | Maintains correct total |
| Multiple evictions | Evict until space available | Guarantees insertion if possible |
| Zero/negative size | Raise ValueError | Sizes must be positive |
For maximum efficiency, use a doubly linked list with a hash map:
class Node:
def __init__(self, key: str, value: int, size: int):
self.key = key
self.value = value
self.size = size
self.prev = None
self.next = None
class WeightedLRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.current_size = 0
self.cache = {} # key -> Node
# Dummy head and tail for easier manipulation
self.head = Node("", 0, 0) # LRU end
self.tail = Node("", 0, 0) # MRU end
self.head.next = self.tail
self.tail.prev = self.head
def _remove(self, node: Node) -> None:
"""Remove node from linked list."""
node.prev.next = node.next
node.next.prev = node.prev
def _add_to_end(self, node: Node) -> :
node.prev = .tail.prev
node. = .tail
.tail.prev. = node
.tail.prev = node
() -> Node:
lru = .head.
._remove(lru)
lru
() -> :
key .cache:
-
node = .cache[key]
._remove(node)
._add_to_end(node)
node.value
() -> :
size > .capacity:
ValueError()
key .cache:
old_node = .cache[key]
.current_size -= old_node.size
._remove(old_node)
.cache[key]
.current_size + size > .capacity .head. != .tail:
lru = ._evict_lru()
.current_size -= lru.size
.cache[lru.key]
new_node = Node(key, value, size)
.cache[key] = new_node
._add_to_end(new_node)
.current_size += size
Complexity Analysis:
| Operation | Time Complexity | Notes |
|---|---|---|
get() | O(1) | HashMap lookup + linked list reorder |
put() (no eviction) | O(1) | HashMap insert + linked list append |
put() (with k evictions) | O(k) | Must evict k items |
Why Doubly Linked List + HashMap? [Source: darkinterview.com]
Approach 1: Segment-based caching
Approach 2: Weighted LFU (Least Frequently Used)
Thread Safety [Source: darkinterview.com]
Size Estimation
sys.getsizeof() or custom size calculatorsMonitoring
TTL (Time-To-Live) [Source: darkinterview.com]
| Aspect | Standard LRU | Weighted LRU |
|---|---|---|
| Capacity unit | Item count | Total size/weight |
| Evictions per put | At most 1 | Potentially multiple |
| Use case | Uniform item sizes | Variable item sizes |
| Implementation | Simpler | Slightly more complex |
| Memory efficiency | Lower (fixed slots) | Higher (flexible) |