Rev 4185: Review tweaks from Ian. in lp:///~jameinel/bzr/lru_cache_linked_lst

John Arbash Meinel john at arbash-meinel.com
Tue Mar 24 14:28:45 GMT 2009


At lp:///~jameinel/bzr/lru_cache_linked_lst

------------------------------------------------------------
revno: 4185
revision-id: john at arbash-meinel.com-20090324142828-69pxzmujy9mxp7ge
parent: john at arbash-meinel.com-20090323022806-snn93cbemn82w1b0
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: lru_cache_linked_lst
timestamp: Tue 2009-03-24 09:28:28 -0500
message:
  Review tweaks from Ian.
-------------- next part --------------
=== modified file 'bzrlib/lru_cache.py'
--- a/bzrlib/lru_cache.py	2009-03-23 02:28:06 +0000
+++ b/bzrlib/lru_cache.py	2009-03-24 14:28:28 +0000
@@ -16,9 +16,10 @@
 
 """A simple least-recently-used (LRU) cache."""
 
-from collections import deque
-
-from bzrlib import symbol_versioning
+from bzrlib import (
+    symbol_versioning,
+    trace,
+    )
 
 
 class _LRUNode(object):
@@ -139,13 +140,13 @@
     def add(self, key, value, cleanup=None):
         """Add a new value to the cache.
 
-        Also, if the entry is ever removed from the queue, call cleanup.
-        Passing it the key and value being removed.
+        Also, if the entry is ever removed from the cache, call
+        cleanup(key, value).
 
         :param key: The key to store it under
         :param value: The object to store
         :param cleanup: None or a function taking (key, value) to indicate
-                        'value' sohuld be cleaned up.
+                        'value' should be cleaned up.
         """
         if key in self._cache:
             node = self._cache[key]
@@ -196,8 +197,6 @@
         # Make sure the cache is shrunk to the correct size
         while len(self._cache) > self._after_cleanup_count:
             self._remove_lru()
-        # No need to compact the queue at this point, because the code that
-        # calls this would have already triggered it based on queue length
 
     def __setitem__(self, key, value):
         """Add a value to the cache, there will be no cleanup function."""
@@ -272,7 +271,8 @@
     This differs in that it doesn't care how many actual items there are,
     it just restricts the cache to be cleaned up after so much data is stored.
 
-    The values that are added must support len(value).
+    The size of items added will be computed using compute_size(value), which
+    defaults to len() if not supplied.
     """
 
     def __init__(self, max_size=1024*1024, after_cleanup_size=None,
@@ -300,21 +300,28 @@
     def add(self, key, value, cleanup=None):
         """Add a new value to the cache.
 
-        Also, if the entry is ever removed from the queue, call cleanup.
-        Passing it the key and value being removed.
+        Also, if the entry is ever removed from the cache, call
+        cleanup(key, value).
 
         :param key: The key to store it under
         :param value: The object to store
         :param cleanup: None or a function taking (key, value) to indicate
-                        'value' sohuld be cleaned up.
+                        'value' should be cleaned up.
         """
         node = self._cache.get(key, None)
         value_len = self._compute_size(value)
         if value_len >= self._after_cleanup_size:
+            # The new value is 'too big to fit', as it would fill up/overflow
+            # the cache all by itself
+            trace.mutter('Adding the key %r to an LRUSizeCache failed.'
+                         ' value %d is too big to fit in a the cache'
+                         ' with size %d %d', key, value_len,
+                         self._after_cleanup_size, self._max_size)
             if node is not None:
-                # The new value is too large, we won't be replacing the value,
-                # just removing the node completely
+                # We won't be replacing the old node, so just remove it
                 self._remove_node(node)
+            if cleanup is not None:
+                cleanup(key, value)
             return
         if node is None:
             node = _LRUNode(key, value, cleanup=cleanup)

=== modified file 'bzrlib/tests/test_lru_cache.py'
--- a/bzrlib/tests/test_lru_cache.py	2009-03-22 19:30:45 +0000
+++ b/bzrlib/tests/test_lru_cache.py	2009-03-24 14:28:28 +0000
@@ -90,7 +90,8 @@
         # 'foo' is now most recent, so final cleanup will call it last
         cache['foo']
         cache.clear()
-        self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')], cleanup_called)
+        self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')],
+                         cleanup_called)
 
     def test_cleanup_on_replace(self):
         """Replacing an object should cleanup the old value."""
@@ -177,7 +178,7 @@
         cache.cleanup()
         self.assertEqual(2, len(cache))
 
-    def test_compact_preserves_last_access_order(self):
+    def test_preserve_last_access_order(self):
         cache = lru_cache.LRUCache(max_cache=5)
 
         # Add these in order
@@ -312,6 +313,22 @@
         self.assertEqual(3, cache._value_size)
         self.assertEqual({'test':'key'}, cache.items())
 
+    def test_no_add_over_size_cleanup(self):
+        """If a large value is not cached, we will call cleanup right away."""
+        cleanup_calls = []
+        def cleanup(key, value):
+            cleanup_calls.append((key, value))
+
+        cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
+        self.assertEqual(0, cache._value_size)
+        self.assertEqual({}, cache.items())
+        cache.add('test', 'key that is too big', cleanup=cleanup)
+        # key was not added
+        self.assertEqual(0, cache._value_size)
+        self.assertEqual({}, cache.items())
+        # and cleanup was called
+        self.assertEqual([('test', 'key that is too big')], cleanup_calls)
+
     def test_adding_clears_cache_based_on_size(self):
         """The cache is cleared in LRU order until small enough"""
         cache = lru_cache.LRUSizeCache(max_size=20)



More information about the bazaar-commits mailing list