146. LRU Cache

medium Mixed Practice
Design a data structure that follows the Least Recently Used (LRU) cache eviction policy. Implement the LRUCache class with a constructor that accepts a positive integer capacity, a get(key) method that returns the value if the key exists or -1 otherwise, and a put(key, value) method that inserts or updates the key-value pair. When put causes the number of keys to exceed capacity, evict the least recently used key. Both get and put must run in O(1) average time.

Example

capacity = 2

put(1, 1), put(2, 2), get(1) returns 1, put(3, 3), get(2) returns -1, put(4, 4), get(1) returns -1, get(3) returns 3, get(4) returns 4

After put(1,1) and put(2,2), the cache is full. get(1) returns 1 and marks key 1 as recently used. When put(3,3) is called, key 2 is evicted because it was used least recently, so get(2) returns -1. When put(4,4) is called, key 1 is evicted, so get(1) returns -1 while keys 3 and 4 are still accessible.

Combine a doubly linked list with a hashmap. The linked list maintains recency order — most recently used at the tail, least recently used at the head. The hashmap maps keys to their list nodes for O(1) lookup. On access, move the node to the tail. On eviction, remove from the head.

Why this works

A hashmap alone gives O(1) lookups but cannot track recency order. A linked list alone tracks order but has O(n) lookups. By combining both – hashmap for instant key-to-node access, doubly linked list for O(1) insertion/removal – every operation (get, put, evict) runs in O(1) time. Dummy head and tail nodes eliminate edge cases when the list is empty.

Step by step

  1. HashMap + Doubly Linked List — the map stores key-to-node pointers; the list stores nodes in recency order (head = LRU, tail = most recent).
  2. get(key) — look up the node in the map. If found, move it to the tail (most recently used) and return its value.
  3. put(key, value) — if the key exists, remove the old node. Create a new node, add it to the tail, and store it in the map.
  4. Evict when over capacity — remove the node right after the dummy head (the least recently used), and delete its entry from the map.
Time: O(1) get/put Space: O(capacity)
class LRUCache {
    private int capacity;
    private Map<Integer, Node> cache; // key → Node for O(1) lookup
    private Node head, tail;

    private class Node {
        int key, val;
        Node prev, next;
        Node(int key, int val) {
            this.key = key;
            this.val = val;
        }
    }

    public LRUCache(int capacity) {
        this.capacity = capacity;
        cache = new HashMap<>();
        head = new Node(0, 0);
        tail = new Node(0, 0);
        head.next = tail; // dummy head/tail avoid null-check edge cases
        tail.prev = head;
    }

    private void remove(Node node) {
        node.prev.next = node.next;
        node.next.prev = node.prev;
    }

    private void addToEnd(Node node) {
        node.prev = tail.prev;
        node.next = tail;
        tail.prev.next = node; // insert just before dummy tail (= most recent)
        tail.prev = node;
    }

    public int get(int key) {
        if (!cache.containsKey(key)) return -1;
        Node node = cache.get(key);
        remove(node);
        addToEnd(node); // mark as most recently used
        return node.val;
    }

    public void put(int key, int value) {
        if (cache.containsKey(key)) {
            remove(cache.get(key));
        }
        Node node = new Node(key, value);
        addToEnd(node);
        cache.put(key, node);
        if (cache.size() > capacity) { // over capacity — evict LRU
            Node lru = head.next;
            remove(lru);
            cache.remove(lru.key);
        }
    }
}
class LRUCache {
private:
    int capacity;
    list<pair<int, int>> dll; // doubly linked list maintains recency order
    unordered_map<int, list<pair<int, int>>::iterator> cache; // key → list iterator for O(1) access

public:
    LRUCache(int capacity) : capacity(capacity) {}

    int get(int key) {
        if (cache.find(key) == cache.end()) return -1;
        dll.splice(dll.end(), dll, cache[key]); // move to end (= most recent) in O(1)
        return cache[key]->second;
    }

    void put(int key, int value) {
        if (cache.find(key) != cache.end()) {
            dll.erase(cache[key]);
        }
        dll.push_back({key, value});
        cache[key] = prev(dll.end());
        if ((int)cache.size() > capacity) { // over capacity — evict LRU from front
            cache.erase(dll.front().first);
            dll.pop_front();
        }
    }
};
class Node:
    def __init__(self, key=0, val=0):
        self.key = key
        self.val = val
        self.prev = None
        self.next = None

class LRUCache:
    def __init__(self, capacity: int):
        self.capacity = capacity
        self.cache = {}  # key → Node for O(1) lookup
        self.head = Node()
        self.tail = Node()
        self.head.next = self.tail  # dummy head/tail avoid null-check edge cases
        self.tail.prev = self.head

    def _remove(self, node):
        node.prev.next = node.next
        node.next.prev = node.prev

    def _add_to_end(self, node):
        node.prev = self.tail.prev
        node.next = self.tail
        self.tail.prev.next = node  # insert just before the dummy tail (= most recent)
        self.tail.prev = node

    def get(self, key: int) -> int:
        if key not in self.cache:
            return -1
        node = self.cache[key]
        self._remove(node)
        self._add_to_end(node)  # mark as most recently used
        return node.val

    def put(self, key: int, value: int) -> None:
        if key in self.cache:
            self._remove(self.cache[key])
        node = Node(key, value)
        self._add_to_end(node)
        self.cache[key] = node
        if len(self.cache) > self.capacity:  # over capacity — evict LRU
            lru = self.head.next
            self._remove(lru)
            del self.cache[lru.key]