ImplementLRUCache [source code]


public class ImplementLRUCache {
static
/******************************************************************************/
class LRUCache {
    Map<Integer, Frame> pagedir;
    Deque<Frame> frames;
    int N, max_size;


    public LRUCache(int capacity) {
        pagedir = new HashMap<> ();
        frames = new ArrayDeque<> ();
        max_size = capacity;
        N = 0;
    }

    public int get(int key) {
        Frame frame = pagedir.get (key);
        if (frame == null)
            return -1;
        if (frames.peekLast () != frame) {
            // remedy pathological case: continuous GET to pollute the queue
            frame.valid = false;
            Frame new_frame = new Frame (frame.kpage, key);
            frames.offer (new_frame);
            pagedir.put (key, new_frame);
        }
        return frame.kpage;
    }

    public void put(int key, int value) {
        // The order matters: check hit first, then check empty slots, then evict
        while (!frames.isEmpty () && !frames.peek ().valid)
            frames.poll ();
        if (pagedir.containsKey (key)) {
            Frame frame = pagedir.get (key), new_frame = new Frame (value, key);
            frame.valid = false;
            frames.offer (new_frame);
            pagedir.put (key, new_frame);
            return;
        }
        if (N < max_size) {
            Frame new_frame = new Frame (value, key);
            pagedir.put (key, new_frame);
            frames.offer (new_frame);
            N++;
            return;
        }
        Frame evictee = frames.poll ();
        pagedir.remove (evictee.upage);
        evictee.kpage = value;
        evictee.upage = key;
        frames.offer (evictee);
        pagedir.put (key, evictee);
    }

    static class Frame {
        int kpage, upage;
        boolean valid = true;
        Frame (int k, int u) {
            kpage = k;
            upage = u;
        }

        public String toString () {
            return String.format ("[%d %d %b]", kpage, upage, valid);
        }
    }
}
/******************************************************************************/
/**
 * Your LRUCache object will be instantiated and called as such:
 * LRUCache obj = new LRUCache(capacity);
 * int param_1 = obj.get(key);
 * obj.put(key,value);
 */

    public static void main(String[] args) {
        final boolean main_debug = false;
        String[][] commands = {
            {"LRUCache","put","put","get","put","get","put","get","get","get", "[null,null,null,1,null,-1,null,-1,3,4]"},     // 1
            {"LRUCache","get","put","get","put","put","get","get", "[null,-1,null,-1,null,null,2,6]"}, // 2
            {"LRUCache","put","put","put","put","put","get","put","get","get","put","get","put","put","put","get","put","get","get","get","get","put","put","get","get","get","put","put","get","put","get","put","get","get","get","put","put","put","get","put","get","get","put","put","get","put","put","put","put","get","put","put","get","put","put","get","put","put","put","put","put","get","put","put","get","put","get","get","get","put","get","get","put","put","put","put","get","put","put","put","put","get","get","get","put","put","put","get","put","put","put","get","put","put","put","get","get","get","put","put","put","put","get","put","put","put","put","put","put","put",  "[null,null,null,null,null,null,-1,null,19,17,null,-1,null,null,null,-1,null,-1,5,-1,12,null,null,3,5,5,null,null,1,null,-1,null,30,5,30,null,null,null,-1,null,-1,24,null,null,18,null,null,null,null,-1,null,null,18,null,null,-1,null,null,null,null,null,18,null,null,-1,null,4,29,30,null,12,-1,null,null,null,null,29,null,null,null,null,17,22,18,null,null,null,-1,null,null,null,20,null,null,null,-1,18,18,null,null,null,null,20,null,null,null,null,null,null,null]"},     //3
        };
        int[][][] arguments = {
            {{2},{1,1},{2,2},{1},{3,3},{2},{4,4},{1},{3},{4}},  //1
            {{2},{2},{2,6},{1},{1,5},{1,2},{1},{2}}, // 2
            {{10},{10,13},{3,17},{6,11},{10,5},{9,10},{13},{2,19},{2},{3},{5,25},{8},{9,22},{5,5},{1,30},{11},{9,12},{7},{5},{8},{9},{4,30},{9,3},{9},{10},{10},{6,14},{3,1},{3},{10,11},{8},{2,14},{1},{5},{4},{11,4},{12,24},{5,18},{13},{7,23},{8},{12},{3,27},{2,12},{5},{2,9},{13,4},{8,18},{1,7},{6},{9,29},{8,21},{5},{6,30},{1,12},{10},{4,15},{7,22},{11,26},{8,17},{9,29},{5},{3,4},{11,30},{12},{4,29},{3},{9},{6},{3,4},{1},{10},{3,29},{10,28},{1,20},{11,13},{3},{3,12},{3,8},{10,9},{3,26},{8},{7},{5},{13,17},{2,27},{11,15},{12},{9,19},{2,15},{3,16},{1},{12,17},{9,1},{6,19},{4},{5},{5},{8,1},{11,7},{5,2},{9,28},{1},{2,2},{7,4},{4,22},{7,24},{9,26},{13,28},{11,26}},        //3
        };
        for (int i = 0; i < commands.length; i++) {
            System.out.println (Printer.separator ());
            String[] test_commands = commands[i];
            int[][] test_args = arguments[i];
            String[] test_answers = test_commands[test_commands.length - 1].replace ("[", "").replace ("]", "").split (",");
            ImplementLRUCache.LRUCache cache = new ImplementLRUCache.LRUCache (test_args[0][0]);
            System.out.printf ("cache capacity: %d\n", cache.max_size);
            for (int j = 1; j < test_commands.length - 1; j++) {
                if (test_commands[j].equals ("put")) {
                    int key = test_args[j][0], value = test_args[j][1];
                    cache.put (key, value);
                    System.out.printf ("put (%d, %d)\n", key, value);
                } else {
                    int key = test_args[j][0], ans = Integer.parseInt (test_answers[j]), output = cache.get (key);
                    System.out.printf ("get (%d) returns %s, expected: %d\n", 
                        key, Printer.wrap (output + "", output == ans ? 92 : 91), ans
                    );
                }
                if (main_debug) System.out.println (Printer.wrap (String.format ("N: %d, frames: %s\npagedir: %s\n", cache.N, cache.frames, cache.pagedir), 93));
            }
        }
    }
}

首先, 这个used是什么概念? 是accessed, 还是written? 我感觉应该是get和set都算;

LRU本身的概念是很熟悉的, 这个题目的一个难点, 是要你get和set都是O(1). 如果只是单纯的用一个list来维护cache, get肯定不可能是O(1). 如果用一个HashMap, 那么put的时候, 假如需要swap, 那么速度很可能就不是O(1)了;

先回忆一下已知的东西, 最普通的LRU当时是怎么设计的?

另外, 这题实际上还多了一个东西, 就是key和value的组合, 这个好像在OS的时候是没有这个内容的; 但是仔细回忆当时VM的时候的LRU, 有一个upage和kpage的概念, 当时实际上是同时维护了upage->kpage和kpage->upage(这个直接维护在cache本身里面)两个mapping的, 所以这题可能这里也需要类似的设计;

但是这样一个设计能够保证put的时候是O(1)吗? 先不急着否定, 思考一下, 是不是其实就是O(1)? 你缺的只是一个证明?

不对, 不是单纯的证明问题, 这个思路确实无法做到put的O(1): 假想你刚evict成功一个, 然后后面一大帮get来了, 全都又mark成accessed, 那么你下一个put肯定就又要走一个完整的clock了;

关键是, 当一个evict即将要发生的时候, 你应该是什么操作?

一个简单的想法, 维护一个upage->kpage的mapping, 然后维护一个kpage的queue是不是就行了? 暂时没有想法, 试试看?

不对, 这个还是普通的LRU做法, 问题在于, 当你get的时候, 你怎么设置当前的这个kpage的accessed? 你能直接从queue里面把这个kpage找到吗? 就算找到了, 能remove吗? 或者用invalidate的方法?

那么这里就要自己维护有效frame的个数了, 因为queue里面坏账会有很多, 不能直接用queue size来判断是否已经满了; 另外一个问题是, 我感觉这个算法其实也不是O(1), 因为如果queue里面有大量的坏账的时候, 找到一个有效的frame, 可能会经历非常非常多的时间, 甚至可能超过capacity, 而是根据之前的操作的数量来决定的;

第二个test: break掉一个忘记考虑的地方, 假如full的时候, 你要put的是一个已经在cache里面的key, 这个时候不应该evict, 而是更新就行了;

test3: break掉了一个低级的错误, 实际上是之前PintosFS的时候碰到过的, 就是当你需要get一个新cache slot的时候, 操作的顺序很重要; 这个上面已经介绍过了;

最后还是AC了; 老实说我这个算法的worst case并不优秀, 但是最后能过也算可以了; 但是实际上调试了很长时间, 作为一个完成过Pintos的人, 花这么多时间, 还是挺丢人的; 而且最后的速度是179ms (14%), 也不算好;

总结一下这里的实现的思路, 基本就是翻版之前Pintos的时候的设计思路, 稍微加了一个copy on write的操作, 然后get的时候加了一个小优化(最频繁的frame的get不会频繁触发copy on write);

大概观察了一个下, test3来看, 好像put和get的数量是差不多的;


没有editorial;


https://leetcode.com/problems/lru-cache/discuss/45911/Java-Hashtable-+-Double-linked-list-(with-a-touch-of-pseudo-nodes)

The problem can be solved with a hashtable that keeps track of the keys and its values in the double linked list. One interesting property about double linked list is that the node can remove itself without other reference. In addition, it takes constant time to add and remove nodes from the head or tail.

这个学习, 对于double LinkedList以前只是听说过而已, 但是实际上有什么好处, 并不清楚;

话说回来, 这题最后的这个复杂度的Follow-Up, 其实也是用一个fancy wheels的思路来完成的, 鸟枪换炮, 用更加高级的数据结构来提高整个设计的效率;

One particularity about the double linked list that I implemented is that I create a pseudo head and tail to mark the boundary, so that we don’t need to check the NULL node during the update. This makes the code more concise and clean, and also it is good for the performance as well.

Voila, here is the code.

class DLinkedNode {  
    int key;  
    int value;  
    DLinkedNode pre;  
    DLinkedNode post;  
}  

// Always add the new node right after head;  
private void addNode(DLinkedNode node){  
    node.pre = head;  
    node.post = head.post;  

    head.post.pre = node;  
    head.post = node;  
}  

// Remove an existing node from the linked list.  
private void removeNode(DLinkedNode node){  
    DLinkedNode pre = node.pre;  
    DLinkedNode post = node.post;  

    pre.post = post;  
    post.pre = pre;  
}  

// Move certain node in between to the head.  
private void moveToHead(DLinkedNode node){  
    this.removeNode(node);  
    this.addNode(node);  
}  

// pop the current tail.   
private DLinkedNode popTail(){  
    DLinkedNode res = tail.pre;  
    this.removeNode(res);  
    return res;  
}  

private Hashtable<Integer, DLinkedNode>   
    cache = new Hashtable<Integer, DLinkedNode>();  
private int count;  
private int capacity;  
private DLinkedNode head, tail;  

public LRUCache(int capacity) {  
    this.count = 0;  
    this.capacity = capacity;  

    head = new DLinkedNode();  
    head.pre = null;  

    tail = new DLinkedNode();  
    tail.post = null;  

    head.post = tail;  
    tail.pre = head;  
}  

public int get(int key) {  

    DLinkedNode node = cache.get(key);  
    if(node == null){  
        return -1; // should raise exception here.  
    }  

    // move the accessed node to the head;  
    this.moveToHead(node);  

    return node.value;  
}  


public void set(int key, int value) {  
    DLinkedNode node = cache.get(key);  

    if(node == null){  

        DLinkedNode newNode = new DLinkedNode();  
        newNode.key = key;  
        newNode.value = value;  

        this.cache.put(key, newNode);  
        this.addNode(newNode);  

        ++count;  

        if(count > capacity){  
            // pop the tail  
            DLinkedNode tail = this.popTail();  
            this.cache.remove(tail.key);  
            --count;  
        }  
    }else{  
        // update the value.  
        node.value = value;  
        this.moveToHead(node);  
    }  

}

针对这题, 这个list设计的函数也都很简单, 一个就是add_first, 一个是remove_last, 然后是一个boost, 类似是move到head的操作; 这个也是一个这题环境下必要的操作; 所有这些操作, 都需求一个直接的delete操作, 这个实现起来也不复杂;

然后这个list本身的维护, 就是通过维护指向dummy head和dummy tail的两个指针来完成的: 注意, 不需要专门包裹到一个class里面, 实际上, 这里的这个自己定义的DLL, 类似于Pintos里面的list elem类, 而list类, 是没有定义的, 直接就是维护head和tail这两个dummy elem就行了; 这个其实是比较C风格的一种设计, 不过针对一般的算法题, 这样搞也可以了; 你也可以再多设计一个list类来wrap这个elem, 不过没有必要;

另外, 他put里面的逻辑也很简单; 首先判断这个key是否已经在cache里面, 如果在, 直接更新, 然后boost; 如果不在, 直接先插进去; 因为默认是插在head(most recently used), 所以不需要额外的操作来管理; 然后最后conditional判断一个是否已经超过大小, 然后如果超过了, pop_last就行了;

public class LRUCache {  
    private Map<Integer, DLinkNode> cache;  
    DLinkNode tail = null;  
    DLinkNode head = null;  
    int capacity;  

    public LRUCache(int capacity) {  
        cache = new HashMap<Integer, DLinkNode>();  
        this.capacity = capacity;  
    }  

    public int get(int key) {  
        if (cache.containsKey(key)) {  
            DLinkNode target = cache.get(key);  
            int value = target.value;  
            target.update();  
            return value;  
        } else return -1;  
    }  

    public void set(int key, int value) {  
        if (cache.containsKey(key)) {  
            DLinkNode target = cache.get(key);  
            target.value = value;  
            target.update();  
        } else {  
            if (capacity == 0) return;  
            if (cache.size() == capacity) {  
                cache.remove(head.key);  
                head.removeFromHead();  
            }  
            DLinkNode newNode = new DLinkNode(key, value);  
            newNode.append();  
            cache.put(key, newNode);  
        }  
    }  

    class DLinkNode {  
        int key;  
        int value;  
        DLinkNode left = null;  
        DLinkNode right = null;  
        public DLinkNode(int key, int value) {  
            this.key = key;  
            this.value = value;  
        }  
        // remove head from list and update head reference.  
        private void removeFromHead() {      
            // if 'this' is the only node, set both head and tail to null.  
            if (tail == this) {               
                head = null;  
                tail = null;  
            } else {  
                head = this.right;  
                head.left = null;  
            }  
        }  
        private void update() {  
            // no need to update if accessing the most revently used value.  
            if (tail == this) return;         
            else {   
                 // remove from current postion and update nodes (if any) on both sides.  
                if (this != head) {          
                    this.left.right = this.right;  
                } else {  
                    head = this.right;  
                }  
                this.right.left = this.left;  
                 // append to tail.  
                this.append();               
            }  
        }  
        private void append() {  
            // inserting the first node.  
            if (tail == null) {       
                head = this;  
                tail = this;  
            // appned as tail and update tail reference.  
            } else {                  
                this.right = null;  
                this.left = tail;  
                tail.right =this;  
                tail = this;   
            }  
        }  
    }  
}

一个改写; 这个就是比较正常的java风格的写法了, 之前OP原来自己的写法简直直接就是function + argument的直接的c风格的写法;

但是他这个实际上看起来并没有特别的好, 因为他没有用一个wrapper class, 你看他head和tail, 还是直接扔在LRU类里面的; 这个就显得很乱, 根本没有完成封装; 不如直接就用c风格函数写写算了;

另外他这个改写一个很蠢的地方是直接把两个dummy都删了, 这个真的是画蛇添足; 无论是逻辑还是效率上面都有所牺牲;

With double linked list implemented by ourselves and a map store key to nodes, we can access to the node corresponding to any key in constant time.
Using Java 's LinkedList can not make constant access. It requires O(n) time to find which key to move to top.

这题还是要依靠自己定义的结构才能完成合适的操作, java的LinkedList因为不通过通过指针的直接操作和访问能力, 所以最后实际上很多能力都没有, 效率也无法做到依靠纯粹的指针操作才能达到的O(1);

作者:

The “tail” is the pseudo node that marks the boundary of the tail, same as that the “head” node is a pseudo node that marks the head. The double linked list can be represented as head (pseudo) <–> head <–> …tail <–> tail (pseudo).
By adding two pseudo nodes to make the boundaries, we could reduce the boundary checking code such as if (head != null), making the code more concise and also more efficient.

A quick question - what is the significance of the HashTable? Although I have understood how the code works, I am finding it difficult to grasp why we need it (a HashTable)?
The HashTable cache here provides a quick loop-up from key to linked node in O(1) time, which is very critical to the required performance.
Node that although the linked list maintains the order of history when a node was added, it does not offer the random access capability to visit any given node because a list is a referenced based container (not random access container like array). Without hash table, one has to linearly traverse through the linked list to locate a specific node, which will violate the performance requirement.
So the combo usage of hash table with referenced based container (e.g., list) where the hash table stores the references to data is very powerful and there have been many similar problems, i.e., 432 All O`one Data Structure.

这个总结很精辟;

@zzg_zzm Thank you for your reply. So, just to confirm, performance is the only reason for including the HashTable, correct? Am I wrong if I think that this can be implemented even without the HashTable (without regards for performance)?
Yes, the usage of hash table is for the required performance. Without performance constraint, one can come up with many “simple” solutions, such as simply using vector> in C++ to store key-value pairs ordered by history. However, both get and set have O(capacity) complexity.
In my opinion, the point for any data structure design problems is to challenge performance. Otherwise, it would be meaningless since many programming languages have provided enormous amount of containers which you can manipulate in enough different ways to solve the issue. But the key is how to address performance trade-off among different containers to achieve the goal.

class LRUCache {  
private:  
  int _cap; vector<pair<int, int>> _keyVal;  

public:  
  LRUCache(int capacity) : _cap(capacity) {}  
  int get(int key) {  
    for (auto& kv : _keyVal) if (kv.first == key) return kv.second;  
    return -1;  
  }  
  void set(int key, int val) {  
    for (auto& kv : _keyVal) if (kv.first == key) { kv.second = val; return; }  
    if (_keyVal.emplace_back(key, val), _keyVal.size() > _cap) _keyVal.erase(_keyVal.begin());  
  }  
};

For LinkedList in java, We don’t have access to internal structure of the LinkedList.
So, Each time we call list.remove(Object o), it actually iterate through the LinkedList to first search this object, and then remove it from its internal structure.
So one operation of remove(Object o) in LinkedList actually takes O(n), not O(1).
The beauty of defining our own Double Linked List is that:

  • We don’t have to iterate the list and find the object in it. Because we already know we have it.
  • we have control to the internal structure of the list and then we can achieve O(1) for remove(Object o).

@petrichory Good idea! But I think we can go further by encapsulating all List relevant stuffs into another inner class as Doubly Linked List abstraction.

    class LruList {  
        private LruNode head = new LruNode();  
        private LruNode tail = new LruNode();  

        LruList() {  
            head.next = tail;  
            tail.prev = head;  
        }  

        void moveToTail(LruNode node) {  
            if (node.prev != null) { // Remove first if node exists in list rather than a new node  
                node.prev.next = node.next;  
                node.next.prev = node.prev;  
            }  
            LruNode last = tail.prev;  
            node.next = tail;  
            node.prev = last;  
            last.next = tail.prev = node;  
        }  

        LruNode removeHead() {  
            LruNode first = head.next;  
            head.next = first.next;  
            first.next.prev = head;  
            return first;  
        }  
    }

It seems like adding one more class but it takes care of all the mess and makes much more clean indeed. Here is the LRUCache looks like now. Thanks for sharing!

    private Map<Integer, LruNode> cache = new HashMap<>();  
    private LruList list = new LruList();  
    private int capacity;  

    public LRUCache(int capacity) {  
        this.capacity = capacity;  
    }  

    public int get(int key) {  
        if (!cache.containsKey(key)) return -1;  
        LruNode node = cache.get(key);  
        list.moveToTail(node);  
        return node.val;  
    }  

    public void put(int key, int val) {  
        if (cache.containsKey(key)) {  
            LruNode node = cache.get(key);  
            node.val = val;  
            list.moveToTail(node);  
        } else {  
            LruNode node = new LruNode(key, val);  
            list.moveToTail(node);  
            cache.put(key, node);  
            if (cache.size() > capacity) {  
                cache.remove(list.removeHead().key);  
            }  
        }  
    }

还是有人意识到这个问题的;


https://leetcode.com/problems/lru-cache/discuss/45976/C++11-code-74ms-Hash-table-+-List

There is a similar example in Java, but I wanted to share my solution using the new C++11 unordered_map and a list. The good thing about lists is that iterators are never invalidated by modifiers (unless erasing the element itself). This way, we can store the iterator to the corresponding LRU queue in the values of the hash map. Since using erase on a list with an iterator takes constant time, all operations of the LRU cache run in constant time.

class LRUCache {  
public:  
    LRUCache(int capacity) : _capacity(capacity) {}  

    int get(int key) {  
        auto it = cache.find(key);  
        if (it == cache.end()) return -1;  
        touch(it);  
        return it->second.first;  
    }  

    void set(int key, int value) {  
        auto it = cache.find(key);  
        if (it != cache.end()) touch(it);  
        else {  
            if (cache.size() == _capacity) {  
                cache.erase(used.back());  
                used.pop_back();  
            }  
            used.push_front(key);  
        }  
        cache[key] = { value, used.begin() };  
    }  

private:  
    typedef list<int> LI;  
    typedef pair<int, LI::iterator> PII;  
    typedef unordered_map<int, PII> HIPII;  

    void touch(HIPII::iterator it) {  
        int key = it->first;  
        used.erase(it->second.second);  
        used.push_front(key);  
        it->second.second = used.begin();  
    }  

    HIPII cache;  
    LI used;  
    int _capacity;  
};

大概看了一下, 还是一个利用语言特性完成的; 上一个算法看的时候, 我也意识到这里的难点在于无法快速的对LinkedList里面的node进行指针访问, 然后想到了用iterator, 但是没有真正去实施, 这个人就真正去做, 而且好像iterator就是能完成一个类似的跟指针一样的操作; 因为这题对于这个内部的指针实际上要求的操作很少, 就是一个从当前位置的remove, 然后一个add_first就行了, 这些在iterator里面正好都是提供了的功能;

这个touch函数设计的还是有点意思的;

I used the same method as yours, but runs double time. Then I find out the only difference is that I compare capacity with used.size() instead of cache.size(). After fixing the problem my program runs as fast as yours. But why does this make such a difference in running time?

It makes sense, as you can see here: { http://en.cppreference.com/w/cpp/container/list/size }
Depending on which C++ standard is used on the compiler, the size of a list is not stored explicitly, thus requiring a complete traversal to compute its size. On newer versions, all STL containers (including list) keep stored its size value, which makes the size() operation constant cost.

反正意思就是要对语言特性本身比较熟悉;

Hi, afernandez90. Great code! BTW, I want to know where comes the names LI, PII, HIPII? Are they abbreviations of some terminologies or just made up by yourself?

在一辉的一个代码里面, 也看到过类似的定义, 好像c++选手都喜欢用这种缩写;

Yes, LI = List of Int, PII = Pair of (Int,Int), HIPII = Hashtable mapping from Int to PII :D


https://leetcode.com/problems/lru-cache/discuss/45939/Laziest-implementation:-Java's-LinkedHashMap-takes-care-of-everything

This is the laziest implementation using Java’s LinkedHashMap. In the real interview, however, it is definitely not what interviewer expected.

    import java.util.LinkedHashMap;  
    public class LRUCache {  
        private LinkedHashMap<Integer, Integer> map;  
        private final int CAPACITY;  
        public LRUCache(int capacity) {  
            CAPACITY = capacity;  
            map = new LinkedHashMap<Integer, Integer>(capacity, 0.75f, true){  
                protected boolean removeEldestEntry(Map.Entry eldest) {  
                    return size() > CAPACITY;  
                }  
            };  
        }  
        public int get(int key) {  
            return map.getOrDefault(key, -1);  
        }  
        public void set(int key, int value) {  
            map.put(key, value);  
        }  
    }

Several points to mention:

不看了, 学不会;

Below is a “normal” HashMap + doubly-linked list implementation:

public class LRUCache {  
    private class Node{  
        int key, value;  
        Node prev, next;  
        Node(int k, int v){  
            this.key = k;  
            this.value = v;  
        }  
        Node(){  
            this(0, 0);  
        }  
    }  
    private int capacity, count;  
    private Map<Integer, Node> map;  
    private Node head, tail;  

    public LRUCache(int capacity) {  
        this.capacity = capacity;  
        this.count = 0;  
        map = new HashMap<>();  
        head = new Node();  
        tail = new Node();  
        head.next = tail;  
        tail.prev = head;  
    }  

    public int get(int key) {  
        Node n = map.get(key);  
        if(null==n){  
            return -1;  
        }  
        update(n);  
        return n.value;  
    }  

    public void set(int key, int value) {  
        Node n = map.get(key);  
        if(null==n){  
            n = new Node(key, value);  
            map.put(key, n);  
            add(n);  
            ++count;  
        }  
        else{  
            n.value = value;  
            update(n);  
        }  
        if(count>capacity){  
            Node toDel = tail.prev;  
            remove(toDel);  
            map.remove(toDel.key);  
            --count;  
        }  
    }  

    private void update(Node node){  
        remove(node);  
        add(node);  
    }  
    private void add(Node node){  
        Node after = head.next;  
        head.next = node;  
        node.prev = head;  
        node.next = after;  
        after.prev = node;  
    }  

    private void remove(Node node){  
        Node before = node.prev, after = node.next;  
        before.next = after;  
        after.prev = before;  
    }  
}

嘲讽来了:

Your solution shows you are a good API user.

Similar solution without using removeEldestEntry

public class LRUCache {  
    LinkedHashMap<Integer,Integer> cache=new LinkedHashMap<>();  
    int cap=0;  

    public LRUCache(int capacity) {  
        cap=capacity;  
    }  
    public int get(int key) {  
        if(cache.containsKey(key)) {  
            int value=cache.get(key);  
            cache.remove(key);  
            cache.put(key,value);  
            return value;  
        }  
        return -1;  
    }  
    public void set(int key, int value) {  
        if(cache.containsKey(key)) {  
          cache.remove(key);  
          cache.put(key,value);  
        }  
        else if(cache.size() == cap) cache.remove(cache.entrySet().iterator().next().getKey());  
       cache.put(key,value);  
    }  
}

这个还行; 所以实际上也看出来这题就是一个fancy wheels的题目: 把底层的Map的实现一换, 顶层的逻辑是非常简单的;

This method is invoked by put and putAll after inserting a new entry into the map. It provides the implementor with the opportunity to remove the eldest entry each time a new one is added. This is useful if the map represents a cache: it allows the map to reduce memory consumption by deleting stale entries.
In other words, after put operation, LinkedHashMap will call this function, and if this function returns true, it will remove the eldest entry. Therefore, we need to make sure it returns true if and only if map.size() is greater than the CAPACITY specified.
For more details you can refer to LinkedHashMap’s documentation.

等于说这种Map实际上就是针对cache的场合设计的; 这种设计的初衷就是把LRU融合了进去的;

果然:

Great solution! In the Android source code they use a LinkedHashMap as well.
https://android.googlesource.com/platform/frameworks/support.git/+/795b97d901e1793dac5c3e67d43c96a758fec388/v4/java/android/support/v4/util/LruCache.java

The parameter of the creation of map object seems not properly.
map = new LinkedHashMap(capacity, 0.75f, true)
Setting capacity with 0.75 loadFactor will let your map stores only up to 0.75*capacity elements. Change it to the following statement may be better:
map = new LinkedHashMap((int)(capacity / 0.75 + 1), 0.75f, true)

Great solution, but if constructor params is too confuse you then just straight forward implment the logic.

private LinkedHashMap<Integer, Integer> map;  
private int SIZE;  
public LRUCache(int capacity) {  
    map = new LinkedHashMap<>();  
    SIZE = capacity;  
}  

public int get(int key) {  
    if(map.containsKey(key)) {  
        int value = map.remove(key);  
        map.put(key, value);  
        return value;  
    }  
    return -1;  
}  

public void put(int key, int value) {  
    if(map.containsKey(key)) {  
        map.remove(key);  
    }else if(map.size() + 1 > SIZE) {  
        map.remove(map.keySet().iterator().next());  
    }  
    map.put(key, value);  
}

这个客观一些;


好像没了, submission最优解也基本都是DLL的做法;


Problem Description

Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put.

get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
put(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.

Follow up:
Could you do both operations in O(1) time complexity?

Example:

LRUCache cache = new LRUCache(2);   // capacity  

cache.put(1, 1);  
cache.put(2, 2);  
cache.get(1);       // returns 1  
cache.put(3, 3);    // evicts key 2  
cache.get(2);       // returns -1 (not found)  
cache.put(4, 4);    // evicts key 1  
cache.get(1);       // returns -1 (not found)  
cache.get(3);       // returns 3  
cache.get(4);       // returns 4

Difficulty:Hard
Total Accepted:163.7K
Total Submissions:839.1K
Contributor:LeetCode
Companies
googlefacebookmicrosoftamazonbloombergubertwittersnapchatzenefitsyahoopalantir
Related Topics
design
Similar Questions
LFU CacheDesign In-Memory File SystemDesign Compressed String Iterator

results matching ""

    No results matching ""