def parse_cch(fname): topo = fnss.parse_rocketfuel_isp_map(fname) topo = topo.to_undirected() components = nx.connected_component_subgraphs(topo) cmpnodes = lambda x, y: x if x.number_of_nodes() > y.number_of_nodes() else y largest_g = reduce(cmpnodes, components) devices = {} for node, nbrs in largest_g.adjacency_iter(): nid = 'uid_' + str(node) devices[nid] = [] for nbr in nbrs: nbr_id = 'uid_' + str(nbr) devices[nid].append(nbr_id) return devices
def _parse_rocketfuel_topology(self): topo = fnss.parse_rocketfuel_isp_map('3257.r0.cch').to_undirected() degree = nx.degree(topo) server, = [i for i in degree if degree[i]==max(degree.values())] topo.node[server]['type'] = 'root' edges = [i for i in degree if degree[i]==1] for i in edges: topo.node[i]['type'] = 'leaf' for i in topo.node.keys(): topo.node[i]['server'] = server if i not in edges and i is not server: topo.node[i]['type'] = 'intermediate' return topo
def main(filename): topology = fnss.parse_rocketfuel_isp_map(filename).to_undirected() old_nodes = topology.number_of_nodes() old_links = topology.number_of_edges() # Keeping the largest connected component max = 0 for component in nx.connected_component_subgraphs(topology): l = len(component.nodes()) if l > max: max = l topology = component num_nodes = topology.number_of_nodes() num_links = topology.number_of_edges() print "%(filename)s %(num_nodes)s (%(old_nodes)s) & %(num_links)s (%(old_links)s) links" % locals( )
def __init__(self, asnum=1755): super(RocketfuelTopo, self).__init__() fname = os.path.join(RocketfuelTopo.dataset_path, "{0}.cch".format(asnum)) if not os.path.isfile(fname): raise Exception( "Invalid AS number: {0} does not exist".format(fname)) topo = fnss.parse_rocketfuel_isp_map(fname) # TODO: support directed topologies renames = {} for node in sorted(topo.nodes()): renames[node] = "s{0}".format(len(renames)) if topo.is_directed(): g = networkx.DiGraph() else: g = networkx.Graph() for m, n in topo.edges(): g.add_edge(renames[m], renames[n]) self.build_from_graph(g)
def test_parse_rockefuel_isp_map(self): rocketfuel_file = path.join(RES_DIR, 'rocketfuel-2914.cch') topology = fnss.parse_rocketfuel_isp_map(rocketfuel_file) self.assertEquals(10961, topology.number_of_nodes()) self.assertEquals(26070, topology.number_of_edges())
def topology_tiscali2(**kwargs): """Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Differently from plain Tiscali, this topology some receivers are appended to routers and only a subset of routers which are actually on the path of some traffic are selected to become ICN routers. These changes make this topology more realistic. Parameters ---------- seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object """ # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR, '3257.r0.cch') ).to_undirected() topology = list(nx.connected_component_subgraphs(topology))[0] # degree of nodes deg = nx.degree(topology) # nodes with degree = 1 onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # Add remove caches to adapt betweenness centrality of caches for i in [181, 208, 211, 220, 222, 250, 257]: icr_candidates.remove(i) icr_candidates.extend([232, 303, 326, 363, 378]) # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [v for v in topology.nodes() if v not in sources + receivers] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms') # deploy stacks topology.graph['icr_candidates'] = set(icr_candidates) for v in sources: fnss.add_stack(topology, v, 'source') for v in receivers: fnss.add_stack(topology, v, 'receiver') for v in routers: fnss.add_stack(topology, v, 'router') # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' return IcnTopology(topology)
def _load_rocketfuel_topology(self): # name: rocketfuel-as_name-heuristic-... as_name, heuristic = self._name[1:3] filename = '%s/maps/%s.%s' % (ROCKETFUEL_PATH, as_name, heuristic) self._topology = fnss.parse_rocketfuel_isp_map( filename).to_undirected()
def topology_tiscali(min_delay=INTERNAL_LINK_DELAY / 1000, max_delay=EXTERNAL_LINK_DELAY / 1000, n_classes=1, **kwargs): """Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object """ random.seed(0) # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map( path.join(TOPOLOGY_RESOURCES_DIR, '3257.r0.cch')).to_undirected() topology = list(nx.connected_component_subgraphs(topology))[0] # degree of nodes deg = nx.degree(topology) # nodes with degree = 1 onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 fifteendeg = [v for v in topology.nodes() if deg[v] == 15] # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 #icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes REPLACED: icr_candidates = [v for v in topology.nodes() if deg[v] >= 2] # 102 nodes topology.graph['type'] = "ROCKET_FUEL" # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 #sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are REPLACED: sources = [random.choice(onedeg)] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 #receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5] REPLACED: #icr_candidates.remove(sources[0]) receivers = [v for v in onedeg] receivers.remove(sources[0]) edge_routers = [] #[list(topology.edge[v].keys())[0] for v in onedeg] for v in receivers: edge_router = list(topology.edge[v].keys())[0] if edge_router not in edge_routers: edge_routers.append(edge_router) routers = [v for v in topology.nodes() if v not in sources + receivers] print "There are " + repr( len(edge_routers)) + " edge routers: " + repr(edge_routers) print "There are " + repr( len(receivers)) + " receivers: " + repr(receivers) print "There are " + repr(len(sources)) + " sources: " + repr(sources) #print "There are " + repr(len(icr_candidates)) + " cache candidates" print "There are " + repr(len(routers)) + " routers: " + repr(routers) # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms') topology.graph['max_delay'] = 0.0 #[0.0]*n_classes topology.graph['min_delay'] = float('inf') #[0.0]*n_classes topology.graph['icr_candidates'] = set(icr_candidates) topology.graph['n_classes'] = n_classes # Deploy stacks for v in sources: fnss.add_stack(topology, v, 'source') for v in receivers: fnss.add_stack(topology, v, 'receiver') for v in routers: fnss.add_stack(topology, v, 'router') topology.graph['receivers'] = receivers topology.graph['sources'] = sources topology.graph['routers'] = routers topology.graph['edge_routers'] = edge_routers topology.graph['parent'] = {x: None for x in topology.nodes()} topology.graph['n_edgeRouters'] = len(edge_routers) # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' return IcnTopology(topology)
def topology_tiscali(network_cache=0.05, n_contents=100000, seed=None): """ Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- network_cache : float Size of network cache (sum of all caches) normalized by size of content population n_contents : int Size of content population seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object """ # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map( path.join(TOPOLOGY_RESOURCES_DIR, '3257.r0.cch')).to_undirected() topology = list(nx.connected_component_subgraphs(topology))[0] # degree of nodes deg = nx.degree(topology) # nodes with degree = 1 onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [ v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5 ] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [ v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5 ] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [ v for v in topology.nodes() if v not in caches + sources + receivers ] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms') # randomly allocate contents to sources content_placement = uniform_content_placement(topology, range(1, n_contents + 1), sources, seed=seed) for v in sources: fnss.add_stack(topology, v, 'source', {'contents': content_placement[v]}) for v in receivers: fnss.add_stack(topology, v, 'receiver', {}) for v in routers: fnss.add_stack(topology, v, 'router', {}) # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' cache_placement = uniform_cache_placement(topology, network_cache * n_contents, caches) for node, size in cache_placement.iteritems(): fnss.add_stack(topology, node, 'cache', {'size': size}) return topology
def scenario_tiscali(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]): """ Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- scenario_id : str String identifying the scenario (will be in the filename) net_cache : float Size of network cache (sum of all caches) normalized by size of content population n_contents : int Size of content population alpha : float List of alpha of Zipf content distribution """ rate = 12.0 warmup = 9000 duration = 36000 T = 'TISCALI' # name of the topology # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map(path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected() topology = nx.connected_component_subgraphs(topology)[0] deg = nx.degree(topology) onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [v for v in topology.nodes() if v not in caches + sources + receivers] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, internal_link_delay, 'ms') # randomly allocate contents to sources contents = dict([(v, []) for v in sources]) for c in range(1, n_contents + 1): s = choice(sources) contents[s].append(c) for v in sources: fnss.add_stack(topology, v, 'source', {'contents': contents[v]}) for v in receivers: fnss.add_stack(topology, v, 'receiver', {}) for v in routers: fnss.add_stack(topology, v, 'router', {}) # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' for nc in net_cache: size = (float(nc)*n_contents)/len(caches) # size of a single cache C = str(nc) for v in caches: fnss.add_stack(topology, v, 'cache', {'size': size}) fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml')) print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C)) for a in alpha: event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a) fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml')) print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
def topology_tiscali(network_cache=0.05, n_contents=100000, seed=None): """ Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- network_cache : float Size of network cache (sum of all caches) normalized by size of content population n_contents : int Size of content population seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object """ # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR, '3257.r0.cch') ).to_undirected() topology = nx.connected_component_subgraphs(topology)[0] # degree of nodes deg = nx.degree(topology) # nodes with degree = 1 onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [v for v in topology.nodes() if v not in caches + sources + receivers] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms') # randomly allocate contents to sources content_placement = uniform_content_placement(topology, range(1, n_contents+1), sources, seed=seed) for v in sources: fnss.add_stack(topology, v, 'source', {'contents': content_placement[v]}) for v in receivers: fnss.add_stack(topology, v, 'receiver', {}) for v in routers: fnss.add_stack(topology, v, 'router', {}) # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' cache_placement = uniform_cache_placement(topology, network_cache*n_contents, caches) for node, size in cache_placement.iteritems(): fnss.add_stack(topology, node, 'cache', {'size': size}) return topology
def topology_tiscali(**kwargs): """Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- seed : int, optional The seed used for random number generation Returns ------- topology : fnss.Topology The topology object """ # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map( path.join(TOPOLOGY_RESOURCES_DIR, "3257.r0.cch")).to_undirected() topology = largest_connected_component_subgraph(topology) # degree of nodes deg = nx.degree(topology) # nodes with degree = 1 onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [v for v in onedeg if deg[list(topology.adj[v].keys())[0]] > 4.5] # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [ v for v in onedeg if deg[list(topology.adj[v].keys())[0]] < 4.5 ] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [v for v in topology.nodes() if v not in sources + receivers] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, "ms") # Deploy stacks topology.graph["icr_candidates"] = set(icr_candidates) for v in sources: fnss.add_stack(topology, v, "source") for v in receivers: fnss.add_stack(topology, v, "receiver") for v in routers: fnss.add_stack(topology, v, "router") # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.adj[u][v]["type"] = "external" # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, "ms", [(u, v)]) else: topology.adj[u][v]["type"] = "internal" return IcnTopology(topology)
""" Rocketfuel topology and traffic matrix ====================================== This example shows how to import a topology from RocketFuel, configure it (assign capacities, weights and delays), generate a traffic matrix and save topology and traffic matrix to XML files. """ import fnss import random # Import RocketFuel topology # Replace the filename with the actual location of the file you want to parse topology = fnss.parse_rocketfuel_isp_map("rocket-fuel-topo-file.cch") # add capacities capacities = [1, 10, 40] capacity_unit = 'Gbps' fnss.set_capacities_edge_betweenness(topology, capacities, capacity_unit, weighted=False) # add weights proportional to inverse of capacity fnss.set_weights_inverse_capacity(topology) # add constant link delays of 2 ms fnss.set_delays_constant(topology, 2, delay_unit='ms') # generate cyclostationary traffic matrix (period 7 days, 24 samples per day) tmc = fnss.sin_cyclostationary_traffic_matrix( topology, mean=0.5, # average flow in TM is 0,5 Gbps
def scenario_tiscali(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]): """ Return a scenario based on Tiscali topology, parsed from RocketFuel dataset Parameters ---------- scenario_id : str String identifying the scenario (will be in the filename) net_cache : float Size of network cache (sum of all caches) normalized by size of content population n_contents : int Size of content population alpha : float List of alpha of Zipf content distribution """ rate = 12.0 warmup = 9000 duration = 36000 T = 'TISCALI' # name of the topology # 240 nodes in the main component topology = fnss.parse_rocketfuel_isp_map( path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected() topology = list(nx.connected_component_subgraphs(topology))[0] deg = nx.degree(topology) onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80 # we select as caches nodes with highest degrees # we use as min degree 6 --> 36 nodes # If we changed min degrees, that would be the number of caches we would have: # Min degree N caches # 2 160 # 3 102 # 4 75 # 5 50 # 6 36 # 7 30 # 8 26 # 9 19 # 10 16 # 11 12 # 12 11 # 13 7 # 14 3 # 15 3 # 16 2 caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes # sources are node with degree 1 whose neighbor has degree at least equal to 5 # we assume that sources are nodes connected to a hub # they are 44 sources = [ v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5 ] # they are # receivers are node with degree 1 whose neighbor has degree at most equal to 4 # we assume that receivers are nodes not well connected to the network # they are 36 receivers = [ v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5 ] # we set router stacks because some strategies will fail if no stacks # are deployed routers = [ v for v in topology.nodes() if v not in caches + sources + receivers ] # set weights and delays on all links fnss.set_weights_constant(topology, 1.0) fnss.set_delays_constant(topology, internal_link_delay, 'ms') # randomly allocate contents to sources contents = dict([(v, []) for v in sources]) for c in range(1, n_contents + 1): s = choice(sources) contents[s].append(c) for v in sources: fnss.add_stack(topology, v, 'source', {'contents': contents[v]}) for v in receivers: fnss.add_stack(topology, v, 'receiver', {}) for v in routers: fnss.add_stack(topology, v, 'router', {}) # label links as internal or external for u, v in topology.edges(): if u in sources or v in sources: topology.edge[u][v]['type'] = 'external' # this prevents sources to be used to route traffic fnss.set_weights_constant(topology, 1000.0, [(u, v)]) fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)]) else: topology.edge[u][v]['type'] = 'internal' for nc in net_cache: size = (float(nc) * n_contents) / len(caches) # size of a single cache C = str(nc) for v in caches: fnss.add_stack(topology, v, 'cache', {'size': size}) fnss.write_topology( topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml')) print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C)) for a in alpha: event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a) fnss.write_event_schedule( event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml')) print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))