Example
capacity = 2
put(1, 1), put(2, 2), get(1) returns 1, put(3, 3), get(2) returns -1, put(4, 4), get(1) returns -1, get(3) returns 3, get(4) returns 4
After put(1,1) and put(2,2), the cache is full. get(1) returns 1 and marks key 1 as recently used. When put(3,3) is called, key 2 is evicted because it was used least recently, so get(2) returns -1. When put(4,4) is called, key 1 is evicted, so get(1) returns -1 while keys 3 and 4 are still accessible.
Why this works
A hashmap alone gives O(1) lookups but cannot track recency order. A linked list alone tracks order but has O(n) lookups. By combining both – hashmap for instant key-to-node access, doubly linked list for O(1) insertion/removal – every operation (get, put, evict) runs in O(1) time. Dummy head and tail nodes eliminate edge cases when the list is empty.
Step by step
- HashMap + Doubly Linked List — the map stores key-to-node pointers; the list stores nodes in recency order (head = LRU, tail = most recent).
- get(key) — look up the node in the map. If found, move it to the tail (most recently used) and return its value.
- put(key, value) — if the key exists, remove the old node. Create a new node, add it to the tail, and store it in the map.
- Evict when over capacity — remove the node right after the dummy head (the least recently used), and delete its entry from the map.
class LRUCache {
private int capacity;
private Map<Integer, Node> cache; // key → Node for O(1) lookup
private Node head, tail;
private class Node {
int key, val;
Node prev, next;
Node(int key, int val) {
this.key = key;
this.val = val;
}
}
public LRUCache(int capacity) {
this.capacity = capacity;
cache = new HashMap<>();
head = new Node(0, 0);
tail = new Node(0, 0);
head.next = tail; // dummy head/tail avoid null-check edge cases
tail.prev = head;
}
private void remove(Node node) {
node.prev.next = node.next;
node.next.prev = node.prev;
}
private void addToEnd(Node node) {
node.prev = tail.prev;
node.next = tail;
tail.prev.next = node; // insert just before dummy tail (= most recent)
tail.prev = node;
}
public int get(int key) {
if (!cache.containsKey(key)) return -1;
Node node = cache.get(key);
remove(node);
addToEnd(node); // mark as most recently used
return node.val;
}
public void put(int key, int value) {
if (cache.containsKey(key)) {
remove(cache.get(key));
}
Node node = new Node(key, value);
addToEnd(node);
cache.put(key, node);
if (cache.size() > capacity) { // over capacity — evict LRU
Node lru = head.next;
remove(lru);
cache.remove(lru.key);
}
}
}
class LRUCache {
private:
int capacity;
list<pair<int, int>> dll; // doubly linked list maintains recency order
unordered_map<int, list<pair<int, int>>::iterator> cache; // key → list iterator for O(1) access
public:
LRUCache(int capacity) : capacity(capacity) {}
int get(int key) {
if (cache.find(key) == cache.end()) return -1;
dll.splice(dll.end(), dll, cache[key]); // move to end (= most recent) in O(1)
return cache[key]->second;
}
void put(int key, int value) {
if (cache.find(key) != cache.end()) {
dll.erase(cache[key]);
}
dll.push_back({key, value});
cache[key] = prev(dll.end());
if ((int)cache.size() > capacity) { // over capacity — evict LRU from front
cache.erase(dll.front().first);
dll.pop_front();
}
}
};
class Node:
def __init__(self, key=0, val=0):
self.key = key
self.val = val
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.cache = {} # key → Node for O(1) lookup
self.head = Node()
self.tail = Node()
self.head.next = self.tail # dummy head/tail avoid null-check edge cases
self.tail.prev = self.head
def _remove(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def _add_to_end(self, node):
node.prev = self.tail.prev
node.next = self.tail
self.tail.prev.next = node # insert just before the dummy tail (= most recent)
self.tail.prev = node
def get(self, key: int) -> int:
if key not in self.cache:
return -1
node = self.cache[key]
self._remove(node)
self._add_to_end(node) # mark as most recently used
return node.val
def put(self, key: int, value: int) -> None:
if key in self.cache:
self._remove(self.cache[key])
node = Node(key, value)
self._add_to_end(node)
self.cache[key] = node
if len(self.cache) > self.capacity: # over capacity — evict LRU
lru = self.head.next
self._remove(lru)
del self.cache[lru.key]