def test_deepcopy(self): kc = cache.LruCache(10) kvc = cache.keyval_cache(kc) kvc.put(1, 2) self.assertFalse(kc.has(1)) kc.put(3) self.assertFalse(kvc.has(3))
def test_deepcopy(self): kc = cache.LruCache(10) kvc = cache.keyval_cache(kc) kvc.put(1, 2) assert not kc.has(1) kc.put(3) assert not kvc.has(3)
def test_naming(self): c = cache.keyval_cache(cache.FifoCache(3)) self.assertEqual(c.get.__name__, 'get') self.assertEqual(c.put.__name__, 'put') self.assertEqual(c.dump.__name__, 'dump') self.assertEqual(c.clear.__name__, 'clear') self.assertGreater(len(c.get.__doc__), 0) self.assertGreater(len(c.put.__doc__), 0) self.assertGreater(len(c.dump.__doc__), 0) self.assertGreater(len(c.clear.__doc__), 0)
def test_naming(self): c = cache.keyval_cache(cache.FifoCache(3)) self.assertEqual(c.get.__name__, "get") self.assertEqual(c.put.__name__, "put") self.assertEqual(c.dump.__name__, "dump") self.assertEqual(c.clear.__name__, "clear") self.assertGreater(len(c.get.__doc__), 0) self.assertGreater(len(c.put.__doc__), 0) self.assertGreater(len(c.dump.__doc__), 0) self.assertGreater(len(c.clear.__doc__), 0)
def test_naming(self): c = cache.keyval_cache(cache.FifoCache(3)) assert c.get.__name__ == "get" assert c.put.__name__ == "put" assert c.dump.__name__ == "dump" assert c.clear.__name__ == "clear" assert len(c.get.__doc__) > 0 assert len(c.put.__doc__) > 0 assert len(c.dump.__doc__) > 0 assert len(c.clear.__doc__) > 0
def test_naming(self): c = cache.keyval_cache(cache.FifoCache(3)) assert c.get.__name__ == 'get' assert c.put.__name__ == 'put' assert c.dump.__name__ == 'dump' assert c.clear.__name__ == 'clear' assert len(c.get.__doc__) > 0 assert len(c.put.__doc__) > 0 assert len(c.dump.__doc__) > 0 assert len(c.clear.__doc__) > 0
def test_key_val_cache(self): c = cache.keyval_cache(cache.FifoCache(3)) c.put(1, 11) self.assertEqual(c.get(1), 11) c.put(1, 12) self.assertEqual(c.get(1), 12) self.assertEqual(c.dump(), [(1, 12)]) c.put(2, 21) self.assertTrue(c.has(1)) self.assertTrue(c.has(2)) c.put(3, 31) k, v = c.put(4, 41) self.assertEqual(c.remove(2), 21) self.assertEqual(len(c), 2) self.assertEqual((k, v), (1, 12)) c.clear() self.assertEqual(len(c), 0)
def test_key_val_cache(self): c = cache.keyval_cache(cache.FifoCache(3)) c.put(1, 11) assert c.get(1) == 11 c.put(1, 12) assert c.get(1) == 12 assert c.dump() == [(1, 12)] c.put(2, 21) assert c.has(1) assert c.has(2) c.put(3, 31) k, v = c.put(4, 41) assert c.remove(2) == 21 assert len(c) == 2 assert (k, v) == (1, 12) c.clear() assert len(c) == 0
def test_key_val_cache(self): c = cache.keyval_cache(cache.FifoCache(3)) c.put(1, 11) self.assertEqual(c.get(1), 11) c.put(1, 12) self.assertEqual(c.get(1), 12) self.assertEqual(c.dump(), [(1, 12)]) c.put(2, 21) self.assertTrue(c.has(1)) self.assertTrue(c.has(2)) c.put(3, 31) k, v = c.put(4, 41) self.assertEqual((k, v), (1, 12)) c.clear() self.assertEqual(len(c), 0) self.assertEqual(c.get.__name__, 'get') self.assertEqual(c.put.__name__, 'put') self.assertEqual(c.dump.__name__, 'dump') self.assertEqual(c.clear.__name__, 'clear')
def test_key_val_cache(self): c = cache.keyval_cache(cache.FifoCache(3)) c.put(1,11) self.assertEqual(c.get(1), 11) c.put(1, 12) self.assertEqual(c.get(1), 12) self.assertEqual(c.dump(), [(1, 12)]) c.put(2, 21) self.assertTrue(c.has(1)) self.assertTrue(c.has(2)) c.put(3, 31) k, v = c.put(4, 41) self.assertEqual((k, v), (1, 12)) c.clear() self.assertEqual(len(c), 0) self.assertEqual(c.get.__name__, 'get') self.assertEqual(c.put.__name__, 'put') self.assertEqual(c.dump.__name__, 'dump') self.assertEqual(c.clear.__name__, 'clear')
def test_zero_val_lru(self): c = cache.keyval_cache(cache.LruCache(10)) reqs = [(10, 0), (10, 1)] for k, v in reqs: c.put(k, v)
def test_zero_val_lru(self): c = cache.keyval_cache(cache.LruCache(10)) reqs = [(10,0), (10, 1)] for k, v in reqs: c.put(k, v)
def __init__(self, topology, cache_policy, shortest_path=None): """Constructors Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary of cache sizes keyed by node self.cache_size = {} # Dictionary of RSN table sizes keyed by node self.rsn_size = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in self.link_type.items(): self.link_type[(v, u)] = link_type for (u, v), delay in self.link_delay.items(): self.link_delay[(v, u)] = delay # Initialize attributes for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: self.cache_size[node] = stack_props['cache_size'] if 'rsn_size' in stack_props: self.rsn_size[node] = stack_props['rsn_size'] elif stack_name == 'source': # Onur: adding the following check and indent the following 3 lines after if if 'contents' in stack_props: contents = stack_props['contents'] for content in contents: self.content_source[content] = node if any(c < 1 for c in self.cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in self.cache_size: if self.cache_size[node] < 1: self.cache_size[node] = 1 if any(c < 1 for c in self.rsn_size.values()): logger.warn('Some RSN tables have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in self.rsn_size: if self.rsn_size[node] < 1: self.rsn_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache and RSN objects storing the content self.cache = {node: CACHE_POLICY[policy_name](self.cache_size[node], **policy_args) for node in self.cache_size} # RSN and cache must have the same cache eviction policy self.rsn = {node: keyval_cache(CACHE_POLICY[policy_name](size, **policy_args), size) for node, size in self.rsn_size.iteritems()}