def handle_event(self, time, event): # get all required data receiver = event['receiver'] content = event['content'] log = event['log'] source = self.content_location[content] path = self.shortest_path[receiver][source] # handle (and log if required) actual request for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_INTEREST, content, self.link_type[(u,v)]) if v == source: if log: self.cache_logger.log_cache_info(time, EVENT_SERVER_HIT, content, receiver, 'N/A', source) serving_node = v break if get_stack(self.topology, v)[0] == 'cache': if self.caches[v].has_content(content): if log: self.cache_logger.log_cache_info(time, EVENT_CACHE_HIT, content, receiver, v, source) serving_node = v break path = self.shortest_path[serving_node][receiver] c = len(path) - 1 N = sum([self.cache_size[v] for v in path if get_stack(self.topology, v)[0] == 'cache']) x = 0 for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if get_stack(self.topology, v)[0] == 'cache': x += 1 if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_DATA, content, self.link_type[(u,v)]) if v != receiver and get_stack(self.topology, v)[0] == 'cache': prob_cache = float(N)/(self.t_tw * self.cache_size[v]) * (float(x)/float(c))**c if random.random() < prob_cache: self.caches[v].store(content) # insert content
def test_read_write_topology(self): tmp_topo_file = path.join(TMP_DIR, 'toporw.xml') fnss.write_topology(self.G, tmp_topo_file) self.assertTrue(path.exists(tmp_topo_file)) read_topo = fnss.read_topology(tmp_topo_file) self.assertEqual(len(self.G), len(read_topo)) self.assertEqual(self.G.number_of_edges(), read_topo.number_of_edges()) self.assertEqual('tcp', fnss.get_stack(read_topo, 2)[0]) self.assertEqual(1024, fnss.get_stack(read_topo, 2)[1]['rcvwnd']) self.assertEqual('cubic', fnss.get_stack(read_topo, 2)[1]['protocol']) self.assertEqual(len(fnss.get_application_names(self.G, 2)), len(fnss.get_application_names(read_topo, 2))) self.assertEqual( 'fnss', fnss.get_application_properties(read_topo, 2, 'server')['user-agent']) self.assertEqual([2, 4, 6], [ v for v in read_topo.nodes() if fnss.get_stack(read_topo, v) is not None and fnss.get_stack(read_topo, v)[0] == 'tcp' ]) self.assertEqual([2, 4], [ v for v in read_topo.nodes() if 'client' in fnss.get_application_names(read_topo, v) ]) self.assertEqual([2], [ v for v in read_topo.nodes() if 'server' in fnss.get_application_names(read_topo, v) ])
def test_add_stack_kw_attr(self): fnss.add_stack(self.topo, 1, 's_name', att1='val1') self.assertEqual(fnss.get_stack(self.topo, 1, data=False), 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', { 'att1': 'val1' }))
def test_add_stack_mixed_attr(self): fnss.add_stack(self.topo, 1, 's_name', {'att1': 'val1'}, att2='val2') self.assertEqual(fnss.get_stack(self.topo, 1, data=False), 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', { 'att1': 'val1', 'att2': 'val2' }))
def handle_event(self, time, event): # get all required data receiver = event['receiver'] content = event['content'] log = event['log'] source = self.content_location[content] path = self.shortest_path[receiver][source] # handle (and log if required) actual request req_delay = 0 resp_delay = 0 for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] # if log: # self.link_logger.log_link_info(time, u, v, PACKET_TYPE_INTEREST, content, self.link_type[(u,v)]) # req_delay += self.topology.edge[u][v]['delay'] # if v == source: # if log: self.cache_logger.log_cache_info(time, EVENT_SERVER_HIT, content, receiver, 'N/A', source) # serving_node = v # break # if get_stack(self.topology, v)[0] == 'cache': # if self.caches[v].has_content(content): # if log: self.cache_logger.log_cache_info(time, EVENT_CACHE_HIT, content, receiver, v, source) # serving_node = v # break # NEW SECTION for new logging if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_INTEREST, content, self.link_type[(u,v)]) req_delay += self.topology.edge[u][v]['delay'] if v == source: path = self.shortest_path[v][receiver] hc = len(path)-2 repo = 'repo_' + str(v); #if log: self.cache_logger_new.log_cache_info_new(time, v, EVENT_SERVER_HIT, content, hc) if log: self.cache_logger_new.log_cache_info_new(time, repo, 'Hit_Event', content, hc) serving_node = v break if get_stack(self.topology, v)[0] == 'cache': if self.caches[v].has_content(content): path = self.shortest_path[v][receiver] hc = len(path)-2 cache = 'cache_' + str(v); #if log: self.cache_logger_new.log_cache_info_new(time, v, EVENT_CACHE_HIT, content, hc) if log: self.cache_logger_new.log_cache_info_new(time, cache, 'Hit_Event', content, hc) serving_node = v break path = self.shortest_path[serving_node][receiver] for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_DATA, content, self.link_type[(u,v)]) resp_delay += self.topology.edge[u][v]['delay'] if v != receiver and get_stack(self.topology, v)[0] == 'cache': self.caches[v].store(content) # insert content if log: self.delay_logger.log_delay_info(time, receiver, source, content, req_delay, resp_delay)
def __init__(self, topology, shortest_path=None): """Constructors Parameters ---------- topology : fnss.Topology The topology object shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else nx.all_pairs_shortest_path(topology) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary of cache sizes keyed by node self.cache_size = {} self.colla_table = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology.to_directed(), 'type') self.link_delay = fnss.get_delays(topology.to_directed()) policy_name = topology.graph['cache_policy'] # Initialize attributes for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'cache': self.cache_size[node] = stack_props['size'] self.colla_table[node] = {} elif stack_name == 'source': contents = stack_props['contents'] for content in contents: self.content_source[content] = node cache_size = dict((node, fnss.get_stack(topology, node)[1]['size']) for node in topology.nodes_iter() if fnss.get_stack(topology, node)[0] == 'cache') # The actual cache object storing the content self.caches = dict((node, cache_policy_register[policy_name](cache_size[node])) for node in cache_size)
def get_content(self, node, content=None): """Get a content from a server or a cache. Parameters ---------- node : any hashable type The node where the content is retrieved Returns ------- content : bool True if the content is available, False otherwise """ if content is None: if node in self.model.cache: cache_hit = self.model.cache[node].get(self.session['content']) if cache_hit: if self.session['log']: self.collector.cache_hit(node) else: if self.session['log']: self.collector.cache_miss(node) return cache_hit name, props = fnss.get_stack(self.model.topology, node) if name == 'source' and self.session['content'] in props[ 'contents']: if self.collector is not None and self.session['log']: self.collector.server_hit(node) return True else: return False else: if node in self.model.cache: cache_hit = self.model.cache[node].get(content) if cache_hit: if self.session['log']: self.collector.cache_hit(node) else: if self.session['log']: self.collector.cache_miss(node) return cache_hit name, props = fnss.get_stack(self.model.topology, node) if name == 'source' and content in props['contents']: if self.collector is not None and self.session['log']: self.collector.server_hit(node) return True else: return False
def test_add_get_remove_stack(self): for v in self.topo.nodes(): self.assertIsNone(fnss.get_stack(self.topo, v)) fnss.add_stack(self.topo, 12, self.stack_1_name, self.stack_1_props) self.assertEqual(2, len(fnss.get_stack(self.topo, 12))) self.assertIsNone(fnss.get_stack(self.topo, 3)) self.assertEqual(self.stack_1_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_1_props, fnss.get_stack(self.topo, 12)[1]) fnss.add_stack(self.topo, 12, self.stack_1_name, self.stack_2_props) self.assertEqual(self.stack_1_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_2_props, fnss.get_stack(self.topo, 12)[1]) fnss.add_stack(self.topo, 12, self.stack_2_name, self.stack_2_props) self.assertEqual(self.stack_2_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_2_props, fnss.get_stack(self.topo, 12)[1]) fnss.remove_stack(self.topo, 12) self.assertIsNone(fnss.get_stack(self.topo, 12))
def get_content(self, node): """Get a content from a server or a cache. Parameters ---------- node : any hashable type The node where the content is retrieved Returns ------- content : bool True if the content is available, False otherwise """ if node in self.model.cache: cache_hit = self.model.cache[node].get(self.session['content'], self.session['weight']) if self.session['log']: if cache_hit: self.collector.cache_hit(node) else: self.collector.cache_miss(node) return cache_hit name, props = fnss.get_stack(self.model.topology, node) if name == 'source' and self.session['content'] in props['contents']: if self.collector is not None and self.session['log']: self.collector.server_hit(node) return True else: return False
def test_add_get_remove_stack(self): for v in self.topo.nodes_iter(): self.assertIsNone(fnss.get_stack(self.topo, v)) fnss.add_stack(self.topo, 12, self.stack_1_name, self.stack_1_props) self.assertEqual(2, len(fnss.get_stack(self.topo, 12))) self.assertIsNone(fnss.get_stack(self.topo, 3)) self.assertEqual(self.stack_1_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_1_props, fnss.get_stack(self.topo, 12)[1]) fnss.add_stack(self.topo, 12, self.stack_1_name, self.stack_2_props) self.assertEqual(self.stack_1_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_2_props, fnss.get_stack(self.topo, 12)[1]) fnss.add_stack(self.topo, 12, self.stack_2_name, self.stack_2_props) self.assertEqual(self.stack_2_name, fnss.get_stack(self.topo, 12)[0]) self.assertEqual(self.stack_2_props, fnss.get_stack(self.topo, 12)[1]) fnss.remove_stack(self.topo, 12) self.assertIsNone(fnss.get_stack(self.topo, 12))
def handle_event(self, time, event): """ Handle request """ # get all required data receiver = event['receiver'] content = event['content'] log = event['log'] source = self.content_location[content] path = self.shortest_path[receiver][source] # handle (and log if required) actual request for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_INTEREST, content, self.link_type[(u,v)]) if v == source: if log: self.cache_logger.log_cache_info(time, EVENT_SERVER_HIT, content, receiver, 'N/A', source) serving_node = v break if get_stack(self.topology, v)[0] == 'cache': if self.caches[v].has_content(content): if log: self.cache_logger.log_cache_info(time, EVENT_CACHE_HIT, content, receiver, v, source) serving_node = v break path = self.shortest_path[serving_node][receiver] # get the cache with maximum betweenness centrality # if there are more than one cache with max betw then pick the one # closer to the receiver max_betw = -1 selected_cache = None for v in path: if get_stack(self.topology, v)[0] == 'cache': if self.betw[v] >= max_betw: selected_cache = v for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if v == selected_cache: self.caches[v].store(content) if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_DATA, content, self.link_type[(u,v)])
def test_read_write_topology(self): tmp_topo_file = path.join(TMP_DIR, 'toporw.xml') fnss.write_topology(self.G, tmp_topo_file) self.assertTrue(path.exists(tmp_topo_file)) read_topo = fnss.read_topology(tmp_topo_file) self.assertEquals(len(self.G), len(read_topo)) self.assertEquals(self.G.number_of_edges(), read_topo.number_of_edges()) self.assertEquals('tcp', fnss.get_stack(read_topo, 2)[0]) self.assertEquals(1024, fnss.get_stack(read_topo, 2)[1]['rcvwnd']) self.assertEquals('cubic', fnss.get_stack(read_topo, 2)[1]['protocol']) self.assertEquals(len(fnss.get_application_names(self.G, 2)), len(fnss.get_application_names(read_topo, 2))) self.assertEquals('fnss', fnss.get_application_properties(read_topo, 2, 'server')['user-agent']) self.assertEquals([2, 4, 6], [ v for v in read_topo.nodes_iter() if fnss.get_stack(read_topo, v) is not None and fnss.get_stack(read_topo, v)[0] == 'tcp']) self.assertEquals([2, 4], [ v for v in read_topo.nodes_iter() if 'client' in fnss.get_application_names(read_topo, v)]) self.assertEquals([2], [ v for v in read_topo.nodes_iter() if 'server' in fnss.get_application_names(read_topo, v)])
def handle_event(self, time, event): # get all required data receiver = event['receiver'] content = event['content'] log = event['log'] source = self.content_location[content] path = self.shortest_path[receiver][source] # handle (and log if required) actual request req_delay = 0 resp_delay = 0 for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_INTEREST, content, self.link_type[(u,v)]) req_delay += self.topology.edge[u][v]['delay'] if v == source: if log: self.cache_logger.log_cache_info(time, EVENT_SERVER_HIT, content, receiver, 'N/A', source) serving_node = v break if get_stack(self.topology, v)[0] == 'cache': if self.caches[v].has_content(content): if log: self.cache_logger.log_cache_info(time, EVENT_CACHE_HIT, content, receiver, v, source) serving_node = v break path = self.shortest_path[serving_node][receiver] for hop in range(1, len(path)): u = path[hop - 1] v = path[hop] if log: self.link_logger.log_link_info(time, u, v, PACKET_TYPE_DATA, content, self.link_type[(u,v)]) resp_delay += self.topology.edge[u][v]['delay'] if v != receiver and get_stack(self.topology, v)[0] == 'cache': self.caches[v].store(content) # insert content if log: self.delay_logger.log_delay_info(time, receiver, source, content, req_delay, resp_delay)
def __init__(self, topology, log_dir, scenario_id): """ Constructor """ # create logging objects # for running on clay or EE lab, open logging files in /tmp # and then (compress it) and move it to home. This is to avoid frequent # NFS writes, so I write on tmp which is on local drive and copy to home # (NFS) only when the simulation is over. # Compression may be needed on EE lab machine because there is a 5 GB # quota that might be exceeded by logs self.log_dir = log_dir self.scenario_id = scenario_id self.link_logger = LinkLogger(path.join(log_dir, 'RESULTS_%s_LINK.txt' % scenario_id)) #self.cache_logger = CacheLogger(path.join(log_dir, 'RESULTS_%s_CACHE.txt' % scenario_id)) self.cache_logger_new = CacheLoggerNew(path.join(log_dir, '%s.out' % scenario_id)) self.delay_logger = DelayLogger(path.join(log_dir, 'RESULTS_%s_DELAY.txt' % scenario_id)) self.topology = topology # calc shortest paths self.shortest_path = nx.all_pairs_dijkstra_path(topology, weight='weight') # get location of caches and content sources self.content_location = {} # dict of location of contents keyed by content ID self.cache_size = {} # dict of cache sizes keyed by node # Link type: internal or external self.link_type = dict([((u,v), topology.edge[u][v]['type']) for u in topology.edge for v in topology.edge[u]]) for node in topology.nodes_iter(): stack_name, stack_props = get_stack(topology, node) if stack_name == 'cache': self.cache_size[node] = stack_props['size'] elif stack_name == 'source': contents = stack_props['contents'] for content in contents: self.content_location[content] = node # create actual cache objects self.caches = dict([(node, Cache(self.cache_size[node])) for node in self.cache_size])
def __init__(self, topology, log_dir, scenario_id): """ Constructor """ # create logging objects # for running on clay or EE lab, open logging files in /tmp # and then (compress it) and move it to home. This is to avoid frequent # NFS writes, so I write on tmp which is on local drive and copy to home # (NFS) only when the simulation is over. # Compression may be needed on EE lab machine because there is a 5 GB # quota that might be exceeded by logs self.log_dir = log_dir self.scenario_id = scenario_id self.link_logger = LinkLogger(path.join(log_dir, 'RESULTS_%s_LINK.txt' % scenario_id)) self.cache_logger = CacheLogger(path.join(log_dir, 'RESULTS_%s_CACHE.txt' % scenario_id)) self.delay_logger = DelayLogger(path.join(log_dir, 'RESULTS_%s_DELAY.txt' % scenario_id)) self.topology = topology # calc shortest paths self.shortest_path = nx.all_pairs_dijkstra_path(topology, weight='weight') # get location of caches and content sources self.content_location = {} # dict of location of contents keyed by content ID self.cache_size = {} # dict of cache sizes keyed by node # Link type: internal or external self.link_type = dict([((u,v), topology.edge[u][v]['type']) for u in topology.edge for v in topology.edge[u]]) for node in topology.nodes_iter(): stack_name, stack_props = get_stack(topology, node) if stack_name == 'cache': self.cache_size[node] = stack_props['size'] elif stack_name == 'source': contents = stack_props['contents'] for content in contents: self.content_location[content] = node # create actual cache objects self.caches = dict([(node, Cache(self.cache_size[node])) for node in self.cache_size])
def __init__(self, topology, cache_policy, betw=None, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary mapping the reverse, i.e. nodes to set of contents stored self.source_node = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in list(self.link_type.items()): self.link_type[(v, u)] = link_type for (u, v), delay in list(self.link_delay.items()): self.link_delay[(v, u)] = delay cache_size = {} for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] elif stack_name == 'source': contents = stack_props['contents'] self.source_node[node] = contents for content in contents: self.content_source[content] = node if any(c < 1 for c in cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in cache_size: if cache_size[node] < 1: cache_size[node] = 1 #print 'cache_policy=', cache_policy policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} max_betw = -1 avg_betw = 0 for node in betw: avg_betw += betw[node] if betw[node] > max_betw: max_betw = betw[node] avg_betw /= len(betw) # sorted_betw = {} # print betw sorted_betw = sorted(betw.items(), key=lambda value: value[1], reverse=True) # print sorted_betw #thres_betw = sorted_betw[2][1] thres_betw = avg_betw # The actual cache objects storing the content if policy_name=='HYBRID': hybrid=True else: hybrid=False if hybrid: self.cache ={} for node in cache_size: if betw[node] >= thres_betw: self.cache[node] = CACHE_POLICY['LFU_1'](cache_size[node], **policy_args) else: self.cache[node] = CACHE_POLICY['LRU'](cache_size[node], **policy_args) else: self.cache = {node: CACHE_POLICY[policy_name](cache_size[node], central_router=[False,True][betw[node]>avg_betw], betw=betw[node],avg_betw=avg_betw, **policy_args) for node in cache_size} # This is for a local un-coordinated cache (currently used only by # Hashrouting with edge cache) self.local_cache = {} # Keep track of nodes and links removed to simulate failures self.removed_nodes = {} # This keeps track of neighbors of a removed node at the time of removal. # It is needed to ensure that when the node is restored only links that # were removed as part of the node removal are restored and to prevent # restoring nodes that were removed manually before removing the node. self.disconnected_neighbors = {} self.removed_links = {} self.removed_sources = {} self.removed_caches = {} self.removed_local_caches = {}
def __init__(self, topology, cache_policy, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary mapping the reverse, i.e. nodes to set of contents stored self.source_node = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in list(self.link_type.items()): self.link_type[(v, u)] = link_type for (u, v), delay in list(self.link_delay.items()): self.link_delay[(v, u)] = delay cache_size = {} for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] elif stack_name == 'receiver': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] elif stack_name == 'source': if 'contents' in stack_props.keys(): contents = stack_props['contents'] self.source_node[node] = contents for content in contents: self.content_source[content] = node if any(c < 1 for c in cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in cache_size: if cache_size[node] < 1: cache_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = { node: CACHE_POLICY[policy_name](cache_size[node], **policy_args) for node in cache_size } # Content-Download Information (Key=content, Value=number of downloads) self.central_download_table = collections.Counter() self.user_download_table = collections.Counter() # Content-Caching Information (Key=content, Value=number of times being cached) self.central_cache_table = collections.Counter() self.user_cache_table = collections.Counter() # This is for a local un-coordinated cache (currently used only by # Hashrouting with edge cache) self.local_cache = {} # Keep track of nodes and links removed to simulate failures self.removed_nodes = {} # This keeps track of neighbors of a removed node at the time of removal. # It is needed to ensure that when the node is restored only links that # were removed as part of the node removal are restored and to prevent # restoring nodes that were removed manually before removing the node. self.disconnected_neighbors = {} self.removed_links = {} self.removed_sources = {} self.removed_caches = {} self.removed_local_caches = {}
def test_clear_stacks(self): for v in self.topo.nodes_iter(): fnss.add_stack(self.topo, v, self.stack_1_name, self.stack_1_props) fnss.clear_stacks(self.topo) for v in self.topo.nodes_iter(): self.assertIsNone(fnss.get_stack(self.topo, v))
def __init__(self, topology, cache_policy, n_contents=0, shortest_path=None, rl_algorithm=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network rl_algorithm : dict of dict, optional Name of the RL algorithm used along with initialization parameters """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = dict(shortest_path) if shortest_path is not None \ else symmetrify_paths(dict(nx.all_pairs_dijkstra_path(topology))) # Network topology self.topology = topology # Number of contents self.n_contents = n_contents self.POPULARITY = self.POPULARITY or { i: 0 for i in range(1, n_contents + 1) } # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary mapping the reverse, i.e. nodes to set of contents stored self.source_node = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in list(self.link_type.items()): self.link_type[(v, u)] = link_type for (u, v), delay in list(self.link_delay.items()): self.link_delay[(v, u)] = delay cache_size = {} for node in topology.nodes(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] elif stack_name == 'source': contents = stack_props['contents'] self.source_node[node] = contents for content in contents: self.content_source[content] = node if any(c < 1 for c in cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in cache_size: if cache_size[node] < 1: cache_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = { node: CACHE_POLICY[policy_name](cache_size[node], **policy_args) for node in cache_size } # The intelligent agent for each router using RL self.ai_models = {} if rl_algorithm: if self.CACHE: for _, model in NetworkModel.CACHE.items(): model.network_model = self self.ai_models = NetworkModel.CACHE else: states = (2**n_contents) * n_contents actions = 2**n_contents self.CACHE = self.ai_models = { node: RL_ALGO[rl_algorithm['name']](states, actions, node=node, network_model=self) for node in cache_size } # In case of AI this dict stores observation to train the mode with the current state which # would be the "next_state" self.observations = {} # This is for a local un-coordinated cache (currently used only by # Hashrouting with edge cache) self.local_cache = {} # Keep track of nodes and links removed to simulate failures self.removed_nodes = {} # This keeps track of neighbors of a removed node at the time of removal. # It is needed to ensure that when the node is restored only links that # were removed as part of the node removal are restored and to prevent # restoring nodes that were removed manually before removing the node. self.disconnected_neighbors = {} self.removed_links = {} self.removed_sources = {} self.removed_caches = {} self.removed_local_caches = {}
def __init__(self, topology, cache_policy, sched_policy, n_services, rate, seed=0, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology self.topology_depth = 0 # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary mapping the reverse, i.e. nodes to set of contents stored self.source_node = {} # A heap with events (see Event class above) self.eventQ = [] # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in list(self.link_type.items()): self.link_type[(v, u)] = link_type for (u, v), delay in list(self.link_delay.items()): self.link_delay[(v, u)] = delay cache_size = {} comp_size = {} service_size = {} self.rate = rate for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) # get the depth of the tree if stack_name == 'router' and 'depth' in self.topology[node].keys( ): depth = self.topology.node[node]['depth'] if depth > self.topology_depth: self.topology_depth = depth # get computation size per depth if stack_name == 'router': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] if 'computation_size' in stack_props: comp_size[node] = stack_props['computation_size'] if 'service_size' in stack_props: service_size[node] = stack_props['service_size'] elif stack_name == 'source': # A Cloud with infinite resources comp_size[node] = float('inf') service_size[node] = float('inf') contents = stack_props['contents'] self.source_node[node] = contents for content in contents: self.content_source[content] = node if any(c < 1 for c in cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in cache_size: if cache_size[node] < 1: cache_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = { node: CACHE_POLICY[policy_name](cache_size[node], **policy_args) for node in cache_size } # Generate the actual services processing requests self.services = [] self.n_services = n_services internal_link_delay = 0.001 # This is the delay from receiver to router service_time_min = 0.10 # used to be 0.001 service_time_max = 0.10 # used to be 0.1 #delay_min = 0.005 delay_min = 2 * topology.graph[ 'receiver_access_delay'] + service_time_max delay_max = delay_min + 2 * topology.graph['depth'] * topology.graph[ 'link_delay'] + 0.005 service_indx = 0 random.seed(seed) for service in range(0, n_services): service_time = random.uniform(service_time_min, service_time_max) #service_time = 2*random.uniform(service_time_min, service_time_max) deadline = random.uniform(delay_min, delay_max) + 2 * internal_link_delay #deadline = service_time + 1.5*(random.uniform(delay_min, delay_max) + 2*internal_link_delay) s = Service(service_time, deadline) #print ("Service " + str(service) + " has a deadline of " + str(deadline)) self.services.append(s) #""" #END OF Generating Services ### Prepare input for the optimizer if False: aFile = open('inputToOptimizer.txt', 'w') aFile.write("# 1. ServiceIDs\n") first = True tostr = "" for service in range(0, n_services): if first: tostr += str(service) first = False else: tostr += "," + str(service) aFile.write(s) aFile.write("# 2. Set of APs:\n") first = True tostr = "" for ap in topology.graph['receivers']: if first: tostr = str(ap) first = False else: tostr += "," + str(ap) tostr += '\n' aFile.write(tostr) aFile.write("# 3. Set of nodes:\n") first = True tostr = "" for node in topology.nodes_iter(): if node in topology.graph['receivers']: continue if first: tostr = str(node) first = False else: tostr = "," + str(node) tostr += '\n' aFile.write(tostr) aFile.write("# 4. NodeID, serviceID, numCores\n") if topology.graph['type'] == 'TREE': ap_node_to_services = {} ap_node_to_delay = {} for ap in topology.graph['receivers']: node_to_delay = {} node_to_services = {} node_to_delay[ap] = 0.0 ap_node_to_services[ap] = node_to_services ap_node_to_delay[ap] = node_to_delay for node in topology.nodes_iter(): for egress, ingress in topology.edges_iter(): #print str(ingress) + " " + str(egress) if ingress in node_to_delay.keys( ) and egress not in node_to_delay.keys(): node_to_delay[egress] = node_to_delay[ ingress] + topology.edge[ingress][egress][ 'delay'] node_to_services[egress] = [] service_indx = 0 for s in self.services: if s.deadline >= (s.service_time + 2 * node_to_delay[egress]): node_to_services[egress].append( service_indx) service_indx += 1 aFile.write("# 4. Ap,Node,service1,service2, ....]\n") for ap in topology.graph['receivers']: node_to_services = ap_node_to_services[ap] node_to_delay = ap_node_to_delay[ap] for node, services in node_to_services.items(): s = str(ap) + "," + str( node) #+ "," + str(node_to_delay[node]) for serv in services: s += "," + str(serv) s += '\n' aFile.write(s) aFile.write( "# 5. AP, rate_service1, rate_service2, ... rate_serviceN\n" ) rate = 1.0 / (len(topology.graph['receivers']) * len(self.services)) for ap in topology.graph['receivers']: s = str(ap) + "," for serv in self.services: s += str(rate) s += '\n' aFile.write(s) aFile.close() ComputationSpot.services = self.services self.compSpot = { node: ComputationSpot(self, comp_size[node], service_size[node], self.services, node, sched_policy, None) for node in comp_size } #print ("Generated Computation Spot Objects") sys.stdout.flush() # This is for a local un-coordinated cache (currently used only by # Hashrouting with edge cache) self.local_cache = {} # Keep track of nodes and links removed to simulate failures self.removed_nodes = {} # This keeps track of neighbors of a removed node at the time of removal. # It is needed to ensure that when the node is restored only links that # were removed as part of the node removal are restored and to prevent # restoring nodes that were removed manually before removing the node. self.disconnected_neighbors = {} self.removed_links = {} self.removed_sources = {} self.removed_caches = {} self.removed_local_caches = {}
def __init__(self, topology, cache_policy, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary of cache sizes keyed by node self.cache_size = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in self.link_type.items(): self.link_type[(v, u)] = link_type for (u, v), delay in self.link_delay.items(): self.link_delay[(v, u)] = delay # Initialize attributes for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: self.cache_size[node] = stack_props['cache_size'] #print "router node:" #print node elif stack_name == 'source': #print "cache node application's name:" #print fnss.get_application_names(topology, node) #print "content node:" #print node contents = stack_props['contents'] self.cache_size[node] = 1 for content in contents: self.content_source[content] = node if any(c < 1 for c in self.cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in self.cache_size: if self.cache_size[node] < 1: self.cache_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = { node: CACHE_POLICY[policy_name](self.cache_size[node], **policy_args) for node in self.cache_size } #print "all the actual cache objects storing the contents" #for node in self.cache: # print (node, self.cache_size[node]) # store neighbours as dictionary of lists, also importing methods from networkx. self.neighbours = {} for n in self.cache: self.neighbours[n] = nx.neighbors(topology, n)
def __init__(self, topology, cache_policy, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary of cache sizes keyed by node self.cache_size = {} # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in self.link_type.items(): self.link_type[(v, u)] = link_type for (u, v), delay in self.link_delay.items(): self.link_delay[(v, u)] = delay # Initialize attributes for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: self.cache_size[node] = stack_props['cache_size'] elif stack_name == 'source': contents = stack_props['contents'] for content in contents: self.content_source[content] = node if any(c < 1 for c in self.cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in self.cache_size: if self.cache_size[node] < 1: self.cache_size[node] = 1 policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = {node: CACHE_POLICY[policy_name](self.cache_size[node], **policy_args) for node in self.cache_size}
def test_add_stack_no_attr(self): fnss.add_stack(self.topo, 1, 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=False), 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', {}))
def test_add_stack_kw_attr(self): fnss.add_stack(self.topo, 1, 's_name', att1='val1') self.assertEqual(fnss.get_stack(self.topo, 1, data=False), 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', {'att1': 'val1'}))
def test_add_stack_mixed_attr(self): fnss.add_stack(self.topo, 1, 's_name', {'att1': 'val1'}, att2='val2') self.assertEqual(fnss.get_stack(self.topo, 1, data=False), 's_name') self.assertEqual(fnss.get_stack(self.topo, 1, data=True), ('s_name', {'att1': 'val1', 'att2': 'val2'}))
def __init__(self, topology, cache_policy, n_services, rate, seed=0, shortest_path=None): """Constructor Parameters ---------- topology : fnss.Topology The topology object cache_policy : dict or Tree cache policy descriptor. It has the name attribute which identify the cache policy name and keyworded arguments specific to the policy shortest_path : dict of dict, optional The all-pair shortest paths of the network """ # Filter inputs if not isinstance(topology, fnss.Topology): raise ValueError('The topology argument must be an instance of ' 'fnss.Topology or any of its subclasses.') # Shortest paths of the network self.shortest_path = shortest_path if shortest_path is not None \ else symmetrify_paths(nx.all_pairs_dijkstra_path(topology)) # Network topology self.topology = topology # Dictionary mapping each content object to its source # dict of location of contents keyed by content ID self.content_source = {} # Dictionary mapping the reverse, i.e. nodes to set of contents stored self.source_node = {} # A heap with events (see Event class above) self.eventQ = [] # Dictionary of link types (internal/external) self.link_type = nx.get_edge_attributes(topology, 'type') self.link_delay = fnss.get_delays(topology) # Instead of this manual assignment, I could have converted the # topology to directed before extracting type and link delay but that # requires a deep copy of the topology that can take long time if # many content source mappings are included in the topology if not topology.is_directed(): for (u, v), link_type in list(self.link_type.items()): self.link_type[(v, u)] = link_type for (u, v), delay in list(self.link_delay.items()): self.link_delay[(v, u)] = delay cache_size = {} comp_size = {} for node in topology.nodes_iter(): stack_name, stack_props = fnss.get_stack(topology, node) if stack_name == 'router': if 'cache_size' in stack_props: cache_size[node] = stack_props['cache_size'] if 'computation_size' in stack_props: comp_size[node] = stack_props['computation_size'] elif stack_name == 'source' and 'contents' in stack_props: contents = stack_props['contents'] #print "Node: " + repr(node) self.source_node[node] = contents #print ("Source node: " + repr(node) + " hosts: " + repr(contents)) for content in contents: #self.content_source[content] = node # Replaced above line with the below 4 (Onur) if content not in self.content_source: self.content_source[content] = [node] else: self.content_source[content].append(node) if any(c < 1 for c in cache_size.values()): logger.warn('Some content caches have size equal to 0. ' 'I am setting them to 1 and run the experiment anyway') for node in cache_size: if cache_size[node] < 1: cache_size[node] = 1 # Generate the actual services processing requests self.services = [] self.n_services = n_services internal_link_delay = 0.002 # This is the delay from receiver to router """ # Hardcoded generation (for testing): # Service 0 (not used) service_time = 0.5 deadline = 0.5+0.040 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 1: service_time = 0.5 deadline = 0.5 + 0.035 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 2: service_time = 0.5 deadline = 0.5+0.030 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 3: service_time = 0.5 deadline = 0.5+0.025 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 4: service_time = 0.5 deadline = 0.5+0.020 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 5: service_time = 0.5 deadline = 0.5+0.015 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 6: service_time = 0.5 deadline = 0.5+0.015 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 7: service_time = 0.5 deadline = 0.5+0.015 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 8: service_time = 0.5 deadline = 0.5+0.010 + 2*internal_link_delay s = Service(service_time, deadline) self.services.append(s) # Service 9: indx=0 for service in self.services: if indx is 0: indx+=1 continue print "Service: " + repr(indx) + " Deadline: " +repr(self.services[indx].deadline) + " Service Time: " + repr(self.services[indx].service_time) indx+=1 """ #""" GENERATE Services automatically using min, max ranges for service times and deadlines service_time_min = 0.1 service_time_max = 0.1 delay_min = 0.005 delay_max = 0.05 #0.015*2+0.005*4 #aFile = open('services.txt', 'w') #aFile.write("# ServiceID\tserviceTime\tserviceDeadline\n") service_indx = 0 deadlines = [] service_times = [] random.seed(seed) for service in range(0, n_services): source_list = self.content_source[service] service_time = random.uniform(service_time_min, service_time_max)/len(source_list) deadline = service_time + random.uniform(delay_min, delay_max) + 2*internal_link_delay deadlines.append(deadline) service_times.append(service_time) #deadlines = sorted(deadlines) for service in range(0, n_services): service_time = service_times[service_indx] deadline = deadlines[service_indx] #s = str(service_indx) + "\t" + str(service_time) + "\t" + str(deadline) + "\n" #aFile.write(s) service_indx += 1 s = Service(service_time, deadline) self.services.append(s) #aFile.close() #""" #END OF Generating Services self.compSpot = {node: ComputationalSpot(comp_size[node], n_services, self.services, node, None) for node in comp_size} policy_name = cache_policy['name'] policy_args = {k: v for k, v in cache_policy.items() if k != 'name'} # The actual cache objects storing the content self.cache = {node: CACHE_POLICY[policy_name](cache_size[node], **policy_args) for node in cache_size} # This is for a local un-coordinated cache (currently used only by # Hashrouting with edge cache) self.local_cache = {} # Keep track of nodes and links removed to simulate failures self.removed_nodes = {} # This keeps track of neighbors of a removed node at the time of removal. # It is needed to ensure that when the node is restored only links that # were removed as part of the node removal are restored and to prevent # restoring nodes that were removed manually before removing the node. self.disconnected_neighbors = {} self.removed_links = {} self.removed_sources = {} self.removed_caches = {} self.removed_local_caches = {}
def test_clear_stacks(self): for v in self.topo.nodes(): fnss.add_stack(self.topo, v, self.stack_1_name, self.stack_1_props) fnss.clear_stacks(self.topo) for v in self.topo.nodes(): self.assertIsNone(fnss.get_stack(self.topo, v))