Example #1
0
    def test_cache_size(self):
        cache = lru_cache.LRUCache(max_cache=10)
        self.assertEqual(10, cache.cache_size())

        cache = lru_cache.LRUCache(max_cache=256)
        self.assertEqual(256, cache.cache_size())

        cache.resize(512)
        self.assertEqual(512, cache.cache_size())
Example #2
0
    def test_len(self):
        cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)

        cache[1] = 10
        cache[2] = 20
        cache[3] = 30
        cache[4] = 40

        self.assertEqual(4, len(cache))

        cache[5] = 50
        cache[6] = 60
        cache[7] = 70
        cache[8] = 80

        self.assertEqual(8, len(cache))

        cache[1] = 15  # replacement

        self.assertEqual(8, len(cache))

        cache[9] = 90
        cache[10] = 100
        cache[11] = 110

        # We hit the max
        self.assertEqual(10, len(cache))
        self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
                         [n.key for n in walk_lru(cache)])
Example #3
0
    def test_len(self):
        cache = lru_cache.LRUCache(max_cache=10)

        cache[1] = 10
        cache[2] = 20
        cache[3] = 30
        cache[4] = 40

        self.assertEqual(4, len(cache))

        cache[5] = 50
        cache[6] = 60
        cache[7] = 70
        cache[8] = 80

        self.assertEqual(8, len(cache))

        cache[1] = 15 # replacement

        self.assertEqual(8, len(cache))

        cache[9] = 90
        cache[10] = 100
        cache[11] = 110

        # We hit the max
        self.assertEqual(10, len(cache))
Example #4
0
 def _install_inventory_records(self, records):
     if (self._info['serializer'] == self._repository._serializer.format_num
         and self._repository._serializer.support_altered_by_hack):
         return self._install_mp_records_keys(self._repository.inventories,
             records)
     # Use a 10MB text cache, since these are string xml inventories. Note
     # that 10MB is fairly small for large projects (a single inventory can
     # be >5MB). Another possibility is to cache 10-20 inventory texts
     # instead
     inventory_text_cache = lru_cache.LRUSizeCache(10*1024*1024)
     # Also cache the in-memory representation. This allows us to create
     # inventory deltas to apply rather than calling add_inventory from
     # scratch each time.
     inventory_cache = lru_cache.LRUCache(10)
     pb = ui.ui_factory.nested_progress_bar()
     try:
         num_records = len(records)
         for idx, (key, metadata, bytes) in enumerate(records):
             pb.update('installing inventory', idx, num_records)
             revision_id = key[-1]
             parent_ids = metadata['parents']
             # Note: This assumes the local ghosts are identical to the
             #       ghosts in the source, as the Bundle serialization
             #       format doesn't record ghosts.
             p_texts = self._get_parent_inventory_texts(inventory_text_cache,
                                                        inventory_cache,
                                                        parent_ids)
             # Why does to_lines() take strings as the source, it seems that
             # it would have to cast to a list of lines, which we get back
             # as lines and then cast back to a string.
             target_lines = multiparent.MultiParent.from_patch(bytes
                         ).to_lines(p_texts)
             inv_text = ''.join(target_lines)
             del target_lines
             sha1 = osutils.sha_string(inv_text)
             if sha1 != metadata['sha1']:
                 raise errors.BadBundle("Can't convert to target format")
             # Add this to the cache so we don't have to extract it again.
             inventory_text_cache[revision_id] = inv_text
             target_inv = self._source_serializer.read_inventory_from_string(
                 inv_text)
             self._handle_root(target_inv, parent_ids)
             parent_inv = None
             if parent_ids:
                 parent_inv = inventory_cache.get(parent_ids[0], None)
             try:
                 if parent_inv is None:
                     self._repository.add_inventory(revision_id, target_inv,
                                                    parent_ids)
                 else:
                     delta = target_inv._make_delta(parent_inv)
                     self._repository.add_inventory_by_delta(parent_ids[0],
                         delta, revision_id, parent_ids)
             except errors.UnsupportedInventoryKind:
                 raise errors.IncompatibleRevision(repr(self._repository))
             inventory_cache[revision_id] = target_inv
     finally:
         pb.finished()
Example #5
0
    def test_get(self):
        cache = lru_cache.LRUCache(max_cache=5)

        cache.add(1, 10)
        cache.add(2, 20)
        self.assertEqual(20, cache.get(2))
        self.assertIs(None, cache.get(3))
        obj = object()
        self.assertIs(obj, cache.get(3, obj))
Example #6
0
 def test_cleanup_function_deprecated(self):
     """Test that per-node cleanup functions are no longer allowed"""
     cache = lru_cache.LRUCache()
     self.assertRaises(ValueError,
                       self.applyDeprecated,
                       symbol_versioning.deprecated_in((2, 5, 0)),
                       cache.add,
                       "key",
                       1,
                       cleanup=lambda: None)
Example #7
0
    def test_missing(self):
        cache = lru_cache.LRUCache(max_cache=10)

        self.assertFalse('foo' in cache)
        self.assertRaises(KeyError, cache.__getitem__, 'foo')

        cache['foo'] = 'bar'
        self.assertEqual('bar', cache['foo'])
        self.assertTrue('foo' in cache)
        self.assertFalse('bar' in cache)
Example #8
0
    def test_keys(self):
        cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)

        cache[1] = 2
        cache[2] = 3
        cache[3] = 4
        self.assertEqual([1, 2, 3], sorted(cache.keys()))
        cache[4] = 5
        cache[5] = 6
        cache[6] = 7
        self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
Example #9
0
    def test_queue_stays_bounded(self):
        """Lots of accesses does not cause the queue to grow without bound."""
        cache = lru_cache.LRUCache(max_cache=10)

        cache['baz'] = 'biz'
        cache['foo'] = 'bar'

        for i in xrange(1000):
            cache['baz']

        self.failUnless(len(cache._queue) < 40)
Example #10
0
    def test_overflow(self):
        """Adding extra entries will pop out old ones."""
        cache = lru_cache.LRUCache(max_cache=1)

        cache['foo'] = 'bar'
        # With a max cache of 1, adding 'baz' should pop out 'foo'
        cache['baz'] = 'biz'

        self.failIf('foo' in cache)
        self.failUnless('baz' in cache)

        self.assertEqual('biz', cache['baz'])
Example #11
0
    def test_get(self):
        cache = lru_cache.LRUCache(max_cache=5)

        cache[1] = 10
        cache[2] = 20
        self.assertEqual(20, cache.get(2))
        self.assertIs(None, cache.get(3))
        obj = object()
        self.assertIs(obj, cache.get(3, obj))
        self.assertEqual([2, 1], [n.key for n in walk_lru(cache)])
        self.assertEqual(10, cache.get(1))
        self.assertEqual([1, 2], [n.key for n in walk_lru(cache)])
Example #12
0
    def test_overflow(self):
        """Adding extra entries will pop out old ones."""
        cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)

        cache['foo'] = 'bar'
        # With a max cache of 1, adding 'baz' should pop out 'foo'
        cache['baz'] = 'biz'

        self.assertFalse('foo' in cache)
        self.assertTrue('baz' in cache)

        self.assertEqual('biz', cache['baz'])
Example #13
0
    def test_by_usage(self):
        """Accessing entries bumps them up in priority."""
        cache = lru_cache.LRUCache(max_cache=2)

        cache['baz'] = 'biz'
        cache['foo'] = 'bar'

        self.assertEqual('biz', cache['baz'])

        # This must kick out 'foo' because it was the last accessed
        cache['nub'] = 'in'

        self.assertFalse('foo' in cache)
Example #14
0
    def test_cleanup(self):
        cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)

        # Add these in order
        cache[1] = 10
        cache[2] = 20
        cache[3] = 25
        cache[4] = 30
        cache[5] = 35

        self.assertEqual(5, len(cache))
        # Force a compaction
        cache.cleanup()
        self.assertEqual(2, len(cache))
Example #15
0
    def test_cleanup(self):
        cache = lru_cache.LRUCache(max_cache=5, after_cleanup_size=2)

        # Add these in order
        cache.add(1, 10)
        cache.add(2, 20)
        cache.add(3, 25)
        cache.add(4, 30)
        cache.add(5, 35)

        self.assertEqual(5, len(cache))
        # Force a compaction
        cache.cleanup()
        self.assertEqual(2, len(cache))
Example #16
0
    def test_cleanup_shrinks_to_after_clean_size(self):
        cache = lru_cache.LRUCache(max_cache=5, after_cleanup_size=3)

        cache.add(1, 10)
        cache.add(2, 20)
        cache.add(3, 25)
        cache.add(4, 30)
        cache.add(5, 35)

        self.assertEqual(5, len(cache))
        # This will bump us over the max, which causes us to shrink down to
        # after_cleanup_cache size
        cache.add(6, 40)
        self.assertEqual(3, len(cache))
Example #17
0
    def test_cleanup_shrinks_to_after_clean_count(self):
        cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)

        cache[1] = 10
        cache[2] = 20
        cache[3] = 25
        cache[4] = 30
        cache[5] = 35

        self.assertEqual(5, len(cache))
        # This will bump us over the max, which causes us to shrink down to
        # after_cleanup_cache size
        cache[6] = 40
        self.assertEqual(3, len(cache))
Example #18
0
 def test_map_None(self):
     # Make sure that we can properly map None as a key.
     cache = lru_cache.LRUCache(max_cache=10)
     self.assertFalse(None in cache)
     cache[None] = 1
     self.assertEqual(1, cache[None])
     cache[None] = 2
     self.assertEqual(2, cache[None])
     # Test the various code paths of __getitem__, to make sure that we can
     # handle when None is the key for the LRU and the MRU
     cache[1] = 3
     cache[None] = 1
     cache[None]
     cache[1]
     cache[None]
     self.assertEqual([None, 1], [n.key for n in walk_lru(cache)])
Example #19
0
    def test_cleanup_on_replace(self):
        """Replacing an object should cleanup the old value."""
        cleanup_called = []
        def cleanup_func(key, val):
            cleanup_called.append((key, val))

        cache = lru_cache.LRUCache(max_cache=2)
        cache.add(1, 10, cleanup=cleanup_func)
        cache.add(2, 20, cleanup=cleanup_func)
        cache.add(2, 25, cleanup=cleanup_func)

        self.assertEqual([(2, 20)], cleanup_called)
        self.assertEqual(25, cache[2])
        
        # Even __setitem__ should make sure cleanup() is called
        cache[2] = 26
        self.assertEqual([(2, 20), (2, 25)], cleanup_called)
Example #20
0
    def test_cleanup(self):
        """Test that we can use a cleanup function."""
        cleanup_called = []
        def cleanup_func(key, val):
            cleanup_called.append((key, val))

        cache = lru_cache.LRUCache(max_cache=2)

        cache.add('baz', '1', cleanup=cleanup_func)
        cache.add('foo', '2', cleanup=cleanup_func)
        cache.add('biz', '3', cleanup=cleanup_func)

        self.assertEqual([('baz', '1')], cleanup_called)

        # 'foo' is now most recent, so final cleanup will call it last
        cache['foo']
        cache.clear()
        self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')], cleanup_called)
Example #21
0
    def test_preserve_last_access_order(self):
        cache = lru_cache.LRUCache(max_cache=5)

        # Add these in order
        cache[1] = 10
        cache[2] = 20
        cache[3] = 25
        cache[4] = 30
        cache[5] = 35

        self.assertEqual([5, 4, 3, 2, 1], [n.key for n in walk_lru(cache)])

        # Now access some randomly
        cache[2]
        cache[5]
        cache[3]
        cache[2]
        self.assertEqual([2, 3, 5, 4, 1], [n.key for n in walk_lru(cache)])
Example #22
0
 def test_resize_smaller(self):
     cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
     cache[1] = 2
     cache[2] = 3
     cache[3] = 4
     cache[4] = 5
     cache[5] = 6
     self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
     cache[6] = 7
     self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
     # Now resize to something smaller, which triggers a cleanup
     cache.resize(max_cache=3, after_cleanup_count=2)
     self.assertEqual([5, 6], sorted(cache.keys()))
     # Adding something will use the new size
     cache[7] = 8
     self.assertEqual([5, 6, 7], sorted(cache.keys()))
     cache[8] = 9
     self.assertEqual([7, 8], sorted(cache.keys()))
Example #23
0
 def test_resize_larger(self):
     cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
     cache[1] = 2
     cache[2] = 3
     cache[3] = 4
     cache[4] = 5
     cache[5] = 6
     self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
     cache[6] = 7
     self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
     cache.resize(max_cache=8, after_cleanup_count=6)
     self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
     cache[7] = 8
     cache[8] = 9
     cache[9] = 10
     cache[10] = 11
     self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
     cache[11] = 12  # triggers cleanup back to new after_cleanup_count
     self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
Example #24
0
    def test_compact_preserves_last_access_order(self):
        cache = lru_cache.LRUCache(max_cache=5)

        # Add these in order
        cache.add(1, 10)
        cache.add(2, 20)
        cache.add(3, 25)
        cache.add(4, 30)
        cache.add(5, 35)

        self.assertEqual([1, 2, 3, 4, 5], list(cache._queue))

        # Now access some randomly
        cache[2]
        cache[5]
        cache[3]
        cache[2]
        self.assertEqual([1, 2, 3, 4, 5, 2, 5, 3, 2], list(cache._queue))
        self.assertEqual({1:1, 2:3, 3:2, 4:1, 5:2}, cache._refcount)

        # Compacting should save the last position
        cache._compact_queue()
        self.assertEqual([1, 4, 5, 3, 2], list(cache._queue))
        self.assertEqual({1:1, 2:1, 3:1, 4:1, 5:1}, cache._refcount)
Example #25
0
 def test_after_cleanup_none(self):
     cache = lru_cache.LRUCache(max_cache=5, after_cleanup_size=None)
     self.assertEqual(5, cache._after_cleanup_size)
Example #26
0
 def test_after_cleanup_none(self):
     cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
     # By default _after_cleanup_size is 80% of the normal size
     self.assertEqual(4, cache._after_cleanup_count)
Example #27
0
 def __init__(self, session_var):
     self.graph_cache = lru_cache.LRUCache(10)
     self.session_var = session_var
     self.log = logging.getLogger('lp-loggerhead')
Example #28
0
 def test_add__null_key(self):
     cache = lru_cache.LRUCache(max_cache=10)
     self.assertRaises(ValueError, cache.__setitem__, lru_cache._null_key,
                       1)
Example #29
0
 def test_after_cleanup_larger_than_max(self):
     cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
     self.assertEqual(5, cache._after_cleanup_count)