Exemplo n.º 1
0
def generate_topo(n):
    topo = nx.powerlaw_cluster_graph(n,2,0.08)
    # topo = fnss.waxman_1_topology(n=50,alpha=0.6,beta=0.3)
    # topo = fnss.fat_tree_topology(n)
    fnss.set_weights_constant(topo,1)
    fnss.set_delays_constant(topo, 1, 'ms')
    fnss.set_capacities_edge_betweenness(topo,[100,500,1000],'Mbps')
    fnss.write_topology(topo,'topo_pl_50.xml')
def topology_tandem(n=3,nc=0.01, **kwargs):

    
    T = 'TANDEM' # name of the topology
    
    topology = fnss.line_topology(n)
    topology = list(nx.connected_component_subgraphs(topology))[0]

            
    receivers = [0]
    routers = [1, 2]
    #sources = [2]
    
    source_attachment = routers[1];
    source = source_attachment + 1000
    topology.add_edge(source_attachment, source)

    sources = [source]

    topology.graph['icr_candidates'] = set(routers)
    
    fnss.add_stack(topology, source, 'source')

    #for v in sources:
    #    fnss.add_stack(topology, v, 'source')
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver')
    for v in routers:
        fnss.add_stack(topology, v, 'router')
    
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
    for u, v in topology.edges_iter():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    C = str(nc)
    fnss.write_topology(topology, path.join(TOPOLOGY_RESOURCES_DIR, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))

    return IcnTopology(topology)
Exemplo n.º 3
0
 def test_read_write_topology(self):
     tmp_topo_file = path.join(TMP_DIR, 'toporw.xml')
     fnss.write_topology(self.G, tmp_topo_file)
     self.assertTrue(path.exists(tmp_topo_file))
     read_topo = fnss.read_topology(tmp_topo_file)
     self.assertEquals(len(self.G), len(read_topo))
     self.assertEquals(self.G.number_of_edges(),
                       read_topo.number_of_edges())
     self.assertEquals('tcp', fnss.get_stack(read_topo, 2)[0])
     self.assertEquals(1024, fnss.get_stack(read_topo, 2)[1]['rcvwnd'])
     self.assertEquals('cubic', fnss.get_stack(read_topo, 2)[1]['protocol'])
     self.assertEquals(len(fnss.get_application_names(self.G, 2)), 
                   len(fnss.get_application_names(read_topo, 2)))
     self.assertEquals('fnss', fnss.get_application_properties(read_topo, 2, 'server')['user-agent'])
     self.assertEquals([2, 4, 6], [ v for v in read_topo.nodes_iter()
                                   if fnss.get_stack(read_topo, v) is not None
                                   and fnss.get_stack(read_topo, v)[0] == 'tcp'])
     self.assertEquals([2, 4], [ v for v in read_topo.nodes_iter()
                                if 'client' in fnss.get_application_names(read_topo, v)])
     self.assertEquals([2], [ v for v in read_topo.nodes_iter()
                             if 'server' in fnss.get_application_names(read_topo, v)])
Exemplo n.º 4
0
def scenario_simple_test():
    """
    Makes simple scenario for test puropses
    """
    def gen_event(receivers, contents):
        return {'receiver': choice(receivers), 'content': choice(contents)}
    contents = {5: [1, 2, 3, 4], 7: [5, 6, 7, 8]}
    n_caches = 4
    size = 5
    topology = fnss.ring_topology(n_caches)
    for u in range(n_caches):
        v = u + n_caches
        topology.add_edge(u, v)
        fnss.add_stack(topology, u, 'cache', {'size': size})
        if u % 2 == 0:
            fnss.add_stack(topology, v, 'receiver', {})
        else:
            fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    
    event_schedule = fnss.poisson_process_event_schedule(20, 0, 300, 'ms', 
                                                gen_event, [4, 6], range(1, 9))
    fnss.write_topology(topology, path.join(scenarios_dir, 'TOPO_TEST.xml'))
    fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, 'ES_TEST.xml'))
Exemplo n.º 5
0
import fnss
import networkx as nx

# create a topology with 10 core switches, 20 edge switches and 10 hosts
# per switch (i.e. 200 hosts in total)
topology = fnss.two_tier_topology(n_core=10, n_edge=20, n_hosts=10)

# assign capacities
# let's set links connecting servers to edge switches to 1 Gbps
# and links connecting core and edge switches to 10 Gbps.

# get list of core_edge links and edge_leaf links
link_types = nx.get_edge_attributes(topology, 'type')
core_edge_links = [link for link in link_types
                   if link_types[link] == 'core_edge']
edge_leaf_links = [link for link in link_types
                   if link_types[link] == 'edge_leaf']

# assign capacities
fnss.set_capacities_constant(topology, 1, 'Gbps', edge_leaf_links)
fnss.set_capacities_constant(topology, 10, 'Gbps', core_edge_links)

# assign weight 1 to all links
fnss.set_weights_constant(topology, 1)

# assign delay of 10 nanoseconds to each link
fnss.set_delays_constant(topology, 10, 'ns')

# save topology to a file
fnss.write_topology(topology, 'datacenter_topology.xml')
Exemplo n.º 6
0
def scenario_garr(net_cache=[0.01, 0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'GARR' # name of the topology
    
    topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Garr201201.graphml')).to_undirected()
    # sources are nodes representing neighbouring AS's
    sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
    # receivers are internal nodes with degree = 1
    receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
    # caches are all remaining nodes --> 27 caches
    caches = [n for n in topology.nodes() if n not in receivers + sources]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    
    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',[(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
Exemplo n.º 7
0
def scenario_tiscali(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'TISCALI' # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_rocketfuel_isp_map(path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)
    onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
    
    # we select as caches nodes with highest degrees
    # we use as min degree 6 --> 36 nodes
    # If we changed min degrees, that would be the number of caches we would have:
    # Min degree    N caches
    #  2               160
    #  3               102
    #  4                75
    #  5                50
    #  6                36
    #  7                30
    #  8                26
    #  9                19
    # 10                16
    # 11                12
    # 12                11
    # 13                 7
    # 14                 3
    # 15                 3
    # 16                 2
    caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
    
    # sources are node with degree 1 whose neighbor has degree at least equal to 5
    # we assume that sources are nodes connected to a hub
    # they are 44
    sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are 

    # receivers are node with degree 1 whose neighbor has degree at most equal to 4
    # we assume that receivers are nodes not well connected to the network
    # they are 36   
    receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]

    # we set router stacks because some strategies will fail if no stacks
    # are deployed 
    routers = [v for v in topology.nodes() if v not in caches + sources + receivers]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
            
            
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
Exemplo n.º 8
0
def scenario_geant(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'GEANT' # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Geant2012.graphml')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)

    receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
    
    caches = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
    
    # attach sources to topology
    source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
    sources = []
    for v in source_attachments:
        u = v + 1000 # node ID of source
        topology.add_edge(v, u)
        sources.append(u)
    
    routers = [v for v in topology.nodes() if v not in caches + sources + receivers]
    
    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')
    
    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
            
            
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
def scenario_geant(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'GEANT'  # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_topology_zoo(
        path.join(scenarios_dir,
                  'resources/Geant2012.graphml')).to_undirected()
    topology = list(nx.connected_component_subgraphs(topology))[0]

    deg = nx.degree(topology)

    receivers = [v for v in topology.nodes() if deg[v] == 1]  # 8 nodes

    caches = [v for v in topology.nodes() if deg[v] > 2]  # 19 nodes

    # attach sources to topology
    source_attachments = [v for v in topology.nodes()
                          if deg[v] == 2]  # 13 nodes
    sources = []
    for v in source_attachments:
        u = v + 1000  # node ID of source
        topology.add_edge(v, u)
        sources.append(u)

    routers = [
        v for v in topology.nodes() if v not in caches + sources + receivers
    ]

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))

    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
Exemplo n.º 10
0
              if node_types[nodes] == 'right_bell']

for node in left_nodes:
    fnss.add_application(topology, node, 'receiver', {})

for node in right_nodes:
    fnss.add_application(topology, node, 'source', {})

# now create a function that generate events
def rand_request(source_nodes, receiver_nodes):
    source = random.choice(source_nodes)
    receiver = random.choice(receiver_nodes)
    return {'source': source, 'receiver': receiver}

event_schedule = fnss.poisson_process_event_schedule(
                        avg_interval=50,  # 50 ms
                        t_start=0,  # starts at 0
                        duration=10 * 1000,  # 10 sec
                        t_unit='ms',  # milliseconds
                        event_generator=rand_request,  # event gen function
                        source_nodes=right_nodes,  # rand_request argument
                        receiver_nodes=left_nodes  # rand_request argument
                        )
# Write topology and event schedule to files
fnss.write_topology(topology, 'topology.xml')
fnss.write_event_schedule(event_schedule, 'event_schedule.xml')




Exemplo n.º 11
0
def build_topology():

	# We use fat tree topology for datacenters
	kval = 0
	edgeLinkCapacity = [10, 'Mbps']
	aggrLinkCapacity = [100,'Mbps']
	coreLinkCapacity = [1,  'Gbps']
	linkDelay		 = [10, 'ns']

	# Get the value from the network.config file
	lines = open('./network.config', 'r').readlines()
	for line in lines:
		val = line.split()
		if val[0] == "K_VALUE":
			kval = int(val[1])
		elif val[0] == "EDGE_LINK_SPEED":
			edgeLinkCapacity[0] = val[1]
			edgeLinkCapacity[1] = val[2]
		elif val[0] == "AGGR_LINK_SPEED":
			aggrLinkCapacity[0] = val[1]
			aggrLinkCapacity[1] = val[2]
		elif val[0] == "CORE_LINK_SPEED":
			coreLinkCapacity[0] = val[1]
			coreLinkCapacity[1] = val[2]
		elif val[0] == "LINK_DELAY":
			linkDelay[0] = val[1]
			linkDelay[1] = val[2]

	if kval == 0:
		print "ERROR: Wrong value of K for a fat tree topo, exiting"
		sys.exit(0)

	# Build the topology using fnss
	topology = fnss.fat_tree_topology(kval)

	# Get link types
	link_types = nx.get_edge_attributes(topology, 'type')

	edge_leaf_links = [link for link in link_types
                if link_types[link] == 'edge_leaf']

	aggregation_edge_links = [link for link in link_types
                if link_types[link] == 'aggregation_edge']

	core_edge_links = [link for link in link_types
                if link_types[link] == 'core_edge']

	# Set the link speeds
	fnss.set_capacities_constant(topology, edgeLinkCapacity[0], edgeLinkCapacity[1], edge_leaf_links)
	fnss.set_capacities_constant(topology, aggrLinkCapacity[0], aggrLinkCapacity[1], aggregation_edge_links)
	fnss.set_capacities_constant(topology, coreLinkCapacity[0], coreLinkCapacity[1], core_edge_links)

	# Set default weight of 1 to all links
	fnss.set_weights_constant(topology, 1)

	# Set link delay to be 10 ns
	fnss.set_delays_constant(topology, linkDelay[0], linkDelay[1])

	# Generate the topology.xml file
	fnss.write_topology(topology, 'topology.xml')

	# Create mininet topology from fnss with renaming to mininet format
	mn_topo = fnss.to_mininet(topology, relabel_nodes=True)

	net = Mininet(topo=mn_topo, link=TCLink, controller=None)

	net.addController('floodlight', controller=RemoteController, ip="127.0.0.1", port=6653)

	net.start()

	# Dump host connections
	dumpNodeConnections(net.hosts)

	# Test network connectivity
	net.pingAll()
Exemplo n.º 12
0
def scenario_single_cache(net_cache=[0.01], n_contents=100000, alpha=[1.0]):
    """
        Parameters
        ----------
        scenario_id : str
        String identifying the scenario (will be in the filename)
        net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
        n_contents : int
        Size of content population
        alpha : float
        List of alpha of Zipf content distribution
        """
    rate = 4.0
    warmup = 0
    duration = 25000
    numnodes = 2
    
    T = 'SINGLE_CACHE' # name of the topology
    topology = fnss.topologies.simplemodels.line_topology(numnodes)
    #topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Geant2012.graphml')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)
    
    nodes = topology.nodes()
    
    #receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
    receiver = nodes[0]
    
    #caches = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
    cache = nodes[1]
    
    # attach sources to topology
    #source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
    source_attachment = cache

    #sources = []
    #for v in source_attachments:
    #    u = v + 1000 # node ID of source
    #    topology.add_edge(v, u)
    #    sources.append(u)
    source = source_attachment + 1000
    topology.add_edge(source_attachment, source)
    
    
    #routers = [v for v in topology.nodes() if v not in caches + sources + receivers]
    #router = nodes[1]
    
    # randomly allocate contents to sources
    #contents = dict([(v, []) for v in sources])
    #for c in range(1, n_contents + 1):
    #    s = choice(sources)
    #    contents[s].append(c)
    contents = dict([(source, [])])
    for c in range(1, n_contents + 1):
        contents[source].append(c)
    
    #for v in sources:
    #    fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    #for v in receivers:
    #    fnss.add_stack(topology, v, 'receiver', {})
    #for v in routers:
    #    fnss.add_stack(topology, v, 'router', {})

    fnss.add_stack(topology, source, 'source', {'contents': contents[source]})
    fnss.add_stack(topology, receiver, 'receiver', {})
    #fnss.add_stack(topology, router, 'router', {})


    
    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')
    
    # label links as internal or external
    #for u, v in topology.edges():
    #    if u in sources or v in sources:
    #        topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            #fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            #fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        #else:
        #    topology.edge[u][v]['type'] = 'internal'
    
    topology.edge[source_attachment][source]['type'] = 'external'
    fnss.set_weights_constant(topology, 1000.0, [(source_attachment, source)])
    fnss.set_delays_constant(topology, external_link_delay, 'ms', [(source_attachment, source)])
    topology.edge[receiver][cache]['type'] = 'internal'
    #topology.edge[router][cache]['type'] = 'internal'
    
    for nc in net_cache:
        #size = (float(nc)*n_contents)/len(caches) # size of a single cache
        size = (float(nc)*n_contents)
        C = str(nc)
        fnss.add_stack(topology, cache, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    receivers = []
    receivers.append(receiver)
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
def scenario_garr(net_cache=[0.01, 0.05],
                  n_contents=100000,
                  alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'GARR'  # name of the topology

    topology = fnss.parse_topology_zoo(
        path.join(scenarios_dir,
                  'resources/Garr201201.graphml')).to_undirected()
    # sources are nodes representing neighbouring AS's
    sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
    # receivers are internal nodes with degree = 1
    receivers = [
        1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53,
        57, 60
    ]
    # caches are all remaining nodes --> 27 caches
    caches = [n for n in topology.nodes() if n not in receivers + sources]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
def scenario_tiscali(net_cache=[0.05],
                     n_contents=100000,
                     alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'TISCALI'  # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_rocketfuel_isp_map(
        path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected()
    topology = list(nx.connected_component_subgraphs(topology))[0]

    deg = nx.degree(topology)
    onedeg = [v for v in topology.nodes() if deg[v] == 1]  # they are 80

    # we select as caches nodes with highest degrees
    # we use as min degree 6 --> 36 nodes
    # If we changed min degrees, that would be the number of caches we would have:
    # Min degree    N caches
    #  2               160
    #  3               102
    #  4                75
    #  5                50
    #  6                36
    #  7                30
    #  8                26
    #  9                19
    # 10                16
    # 11                12
    # 12                11
    # 13                 7
    # 14                 3
    # 15                 3
    # 16                 2
    caches = [v for v in topology.nodes() if deg[v] >= 6]  # 36 nodes

    # sources are node with degree 1 whose neighbor has degree at least equal to 5
    # we assume that sources are nodes connected to a hub
    # they are 44
    sources = [
        v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5
    ]  # they are

    # receivers are node with degree 1 whose neighbor has degree at most equal to 4
    # we assume that receivers are nodes not well connected to the network
    # they are 36
    receivers = [
        v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5
    ]

    # we set router stacks because some strategies will fail if no stacks
    # are deployed
    routers = [
        v for v in topology.nodes() if v not in caches + sources + receivers
    ]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))

    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
Exemplo n.º 15
0
def topology_grid(nc=0.35, **kwargs):
    
    T = 'GRID' # name of the topology
    
    topology = fnss.Topology(nx.grid_2d_graph(10,10))

    topology = list(nx.connected_component_subgraphs(topology))[0]
    deg = nx.degree(topology)
    nodes = topology.nodes()

    print "Number of NODES #"
    print len(nodes)

            
    receivers = []
    routers = []
    #sources = [2]
    

    source_attachment = random.choice(nodes)
    source = source_attachment + (1000,1000)
    topology.add_edge(source_attachment, source)
    sources = [source]

    # Random placement of RECEIVERS
    num_receivers = 30
    chosen_receivers = 0
    completed = False
    #print "RECEIVERS"
    while (completed == False):
        x = random.choice(nodes)
        if x == source_attachment:
            continue
        else:
            if x not in receivers:
                receivers.append(x)
                chosen_receivers += 1
            if chosen_receivers == num_receivers:
                completed = True

    # Placement of Routers
    routers = [v for v in nodes if v not in receivers]

    print "Number of CLIENTS #"
    print len(receivers)
    print "Number of ROUTERS #"
    print len(routers)

    topology.graph['icr_candidates'] = set(routers)
    
    fnss.add_stack(topology, source, 'source')

    #for v in sources:
    #    fnss.add_stack(topology, v, 'source')
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver')
    for v in routers:
        fnss.add_stack(topology, v, 'router')
    
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
    for u, v in topology.edges_iter():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    C = str(nc)
    fnss.write_topology(topology, path.join(TOPOLOGY_RESOURCES_DIR, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))

    return IcnTopology(topology)
Exemplo n.º 16
0
                        links=topology.edges(),  # 'links' argument
                        )

# Now let's create a schedule with link restoration events
# We assume that the duration of a failure is exponentially distributed with
# average 1 minute.
restore_schedule = fnss.EventSchedule(t_start=0, t_unit='min')
for failure_time, event in event_schedule:
    link = event['link']
    restore_time = failure_time + random.expovariate(1)
    restore_schedule.add(time=restore_time,
                         event={'link': link, 'action': 'up'},
                         absolute_time=True
                         )

# Now merge failure and restoration schedules
# After merging events are still chronologically sorted
event_schedule.add_schedule(restore_schedule)

# Note: there are several ways to create this link failure-restoration schedule
# This method has been used to illustrate a variety of functions and methods
# that FNSS provides to manipulate event schedules

# Write topology, event schedule and traffic matrix to files
fnss.write_topology(topology, 'topology.xml')
fnss.write_event_schedule(event_schedule, 'event_schedule.xml')
fnss.write_traffic_matrix(traffic_matrix, 'traffic_matrix.xml')



Exemplo n.º 17
0
event_schedule = fnss.poisson_process_event_schedule(
    avg_interval=0.5,  # 0.5 min = 30 sec
    t_start=0,  # starts at 0
    duration=60,  # 2 hours
    t_unit="min",  # minutes
    event_generator=rand_failure,  # event gen function
    links=topology.edges(),  # 'links' argument
)

# Now let's create a schedule with link restoration events
# We assume that the duration of a failure is exponentially distributed with
# average 1 minute.
restore_schedule = fnss.EventSchedule(t_start=0, t_unit="min")
for failure_time, event in event_schedule:
    link = event["link"]
    restore_time = failure_time + random.expovariate(1)
    restore_schedule.add(time=restore_time, event={"link": link, "action": "up"}, absolute_time=True)

# Now merge failure and restoration schedules
# After merging events are still chronologically sorted
event_schedule.add_schedule(restore_schedule)

# Note: there are several ways to create this link failure-restoration schedule
# This method has been used to illustrate a variety of functions and methods
# that FNSS provides to manipulate event schedules

# Write topology, event schedule and traffic matrix to files
fnss.write_topology(topology, "topology.xml")
fnss.write_event_schedule(event_schedule, "event_schedule.xml")
fnss.write_traffic_matrix(traffic_matrix, "traffic_matrix.xml")