def test_remove_tracks_size(self): cache = lru_cache.LRUSizeCache() self.assertEqual(0, cache._value_size) cache.add('my key', 'my value text') self.assertEqual(13, cache._value_size) cache._remove('my key') self.assertEqual(0, cache._value_size)
def test_keys(self): cache = lru_cache.LRUSizeCache(max_size=10) cache[1] = 'a' cache[2] = 'b' cache[3] = 'cdef' self.assertEqual([1, 2, 3], sorted(cache.keys()))
def test_remove_tracks_size(self): cache = lru_cache.LRUSizeCache() self.assertEqual(0, cache._value_size) cache['my key'] = 'my value text' self.assertEqual(13, cache._value_size) node = cache._cache['my key'] cache._remove_node(node) self.assertEqual(0, cache._value_size)
def _install_inventory_records(self, records): if (self._info['serializer'] == self._repository._serializer.format_num and self._repository._serializer.support_altered_by_hack): return self._install_mp_records_keys(self._repository.inventories, records) # Use a 10MB text cache, since these are string xml inventories. Note # that 10MB is fairly small for large projects (a single inventory can # be >5MB). Another possibility is to cache 10-20 inventory texts # instead inventory_text_cache = lru_cache.LRUSizeCache(10*1024*1024) # Also cache the in-memory representation. This allows us to create # inventory deltas to apply rather than calling add_inventory from # scratch each time. inventory_cache = lru_cache.LRUCache(10) pb = ui.ui_factory.nested_progress_bar() try: num_records = len(records) for idx, (key, metadata, bytes) in enumerate(records): pb.update('installing inventory', idx, num_records) revision_id = key[-1] parent_ids = metadata['parents'] # Note: This assumes the local ghosts are identical to the # ghosts in the source, as the Bundle serialization # format doesn't record ghosts. p_texts = self._get_parent_inventory_texts(inventory_text_cache, inventory_cache, parent_ids) # Why does to_lines() take strings as the source, it seems that # it would have to cast to a list of lines, which we get back # as lines and then cast back to a string. target_lines = multiparent.MultiParent.from_patch(bytes ).to_lines(p_texts) inv_text = ''.join(target_lines) del target_lines sha1 = osutils.sha_string(inv_text) if sha1 != metadata['sha1']: raise errors.BadBundle("Can't convert to target format") # Add this to the cache so we don't have to extract it again. inventory_text_cache[revision_id] = inv_text target_inv = self._source_serializer.read_inventory_from_string( inv_text) self._handle_root(target_inv, parent_ids) parent_inv = None if parent_ids: parent_inv = inventory_cache.get(parent_ids[0], None) try: if parent_inv is None: self._repository.add_inventory(revision_id, target_inv, parent_ids) else: delta = target_inv._make_delta(parent_inv) self._repository.add_inventory_by_delta(parent_ids[0], delta, revision_id, parent_ids) except errors.UnsupportedInventoryKind: raise errors.IncompatibleRevision(repr(self._repository)) inventory_cache[revision_id] = target_inv finally: pb.finished()
def test_adding_clears_to_after_cleanup_size(self): cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10) cache['key1'] = 'value' # 5 chars cache['key2'] = 'value2' # 6 chars cache['key3'] = 'value23' # 7 chars self.assertEqual(5 + 6 + 7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache['key4'] = 'value234' # 8 chars, over limit # We have to remove 3 keys to get back under limit self.assertEqual(8, cache._value_size) self.assertEqual({'key4': 'value234'}, cache.as_dict())
def test_cleanup(self): cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10) # Add these in order cache['key1'] = 'value' # 5 chars cache['key2'] = 'value2' # 6 chars cache['key3'] = 'value23' # 7 chars self.assertEqual(5 + 6 + 7, cache._value_size) cache.cleanup() # Only the most recent fits after cleaning up self.assertEqual(7, cache._value_size)
def test_adding_clears_cache_based_on_size(self): """The cache is cleared in LRU order until small enough""" cache = lru_cache.LRUSizeCache(max_size=20) cache.add('key1', 'value') # 5 chars cache.add('key2', 'value2') # 6 chars cache.add('key3', 'value23') # 7 chars self.assertEqual(5+6+7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache.add('key4', 'value234') # 8 chars, over limit # We have to remove 2 keys to get back under limit self.assertEqual(6+8, cache._value_size) self.assertEqual({'key2':'value2', 'key4':'value234'}, cache._cache)
def test_resize_larger(self): cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9) cache[1] = 'abc' cache[2] = 'def' cache[3] = 'ghi' cache[4] = 'jkl' # Triggers a cleanup self.assertEqual([2, 3, 4], sorted(cache.keys())) cache.resize(max_size=15, after_cleanup_size=12) self.assertEqual([2, 3, 4], sorted(cache.keys())) cache[5] = 'mno' cache[6] = 'pqr' self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys())) cache[7] = 'stu' self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
def test_custom_sizes(self): def size_of_list(lst): return sum(len(x) for x in lst) cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10, compute_size=size_of_list) cache.add('key1', ['val', 'ue']) # 5 chars cache.add('key2', ['val', 'ue2']) # 6 chars cache.add('key3', ['val', 'ue23']) # 7 chars self.assertEqual(5+6+7, cache._value_size) cache['key2'] # reference key2 so it gets a newer reference time cache.add('key4', ['value', '234']) # 8 chars, over limit # We have to remove 3 keys to get back under limit self.assertEqual(8, cache._value_size) self.assertEqual({'key4':['value', '234']}, cache._cache)
def test_resize_smaller(self): cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9) cache[1] = 'abc' cache[2] = 'def' cache[3] = 'ghi' cache[4] = 'jkl' # Triggers a cleanup self.assertEqual([2, 3, 4], sorted(cache.keys())) # Resize should also cleanup again cache.resize(max_size=6, after_cleanup_size=4) self.assertEqual([4], sorted(cache.keys())) # Adding should use the new max size cache[5] = 'mno' self.assertEqual([4, 5], sorted(cache.keys())) cache[6] = 'pqr' self.assertEqual([6], sorted(cache.keys()))
def test_no_add_over_size(self): """Adding a large value may not be cached at all.""" cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5) self.assertEqual(0, cache._value_size) self.assertEqual({}, cache.as_dict()) cache['test'] = 'key' self.assertEqual(3, cache._value_size) self.assertEqual({'test': 'key'}, cache.as_dict()) cache['test2'] = 'key that is too big' self.assertEqual(3, cache._value_size) self.assertEqual({'test': 'key'}, cache.as_dict()) # If we would add a key, only to cleanup and remove all cached entries, # then obviously that value should not be stored cache['test3'] = 'bigkey' self.assertEqual(3, cache._value_size) self.assertEqual({'test': 'key'}, cache.as_dict()) cache['test4'] = 'bikey' self.assertEqual(3, cache._value_size) self.assertEqual({'test': 'key'}, cache.as_dict())
def test_add_tracks_size(self): cache = lru_cache.LRUSizeCache() self.assertEqual(0, cache._value_size) cache['my key'] = 'my value text' self.assertEqual(13, cache._value_size)
def test_add__null_key(self): cache = lru_cache.LRUSizeCache() self.assertRaises(ValueError, cache.__setitem__, lru_cache._null_key, 1)
def test_basic_init(self): cache = lru_cache.LRUSizeCache() self.assertEqual(2048, cache._max_cache) self.assertEqual(int(cache._max_size * 0.8), cache._after_cleanup_size) self.assertEqual(0, cache._value_size)
def test_basic_init(self): cache = lru_cache.LRUSizeCache() self.assertEqual(2048, cache._max_cache) self.assertEqual(4*2048, cache._compact_queue_length) self.assertEqual(cache._max_size, cache._after_cleanup_size) self.assertEqual(0, cache._value_size)