Example #1
0
 def test_read_write_event_schedule(self):
     action = ['read_email', 'watch_video']
     schedule = fnss.deterministic_process_event_schedule(20, 0, 801, 'ms',
                                                          self.event_gen,
                                                          0.5, action=action)
     time, event = schedule[2]
     tmp_es_file = path.join(TMP_DIR, 'event-schedule.xml')
     fnss.write_event_schedule(schedule, tmp_es_file)
     read_schedule = fnss.read_event_schedule(tmp_es_file)
     self.assertEqual(len(schedule), len(read_schedule))
     read_time, read_event = read_schedule[2]
     self.assertEqual(time, read_time)
     self.assertEqual(event, read_event)
Example #2
0
 def test_read_write_event_schedule(self):
     action = ['read_email', 'watch_video']
     schedule = fnss.deterministic_process_event_schedule(20, 0, 801, 'ms',
                                                          self.event_gen,
                                                          0.5, action=action)
     time, event = schedule[2]
     tmp_es_file = path.join(TMP_DIR, 'event-schedule.xml')
     fnss.write_event_schedule(schedule, tmp_es_file)
     read_schedule = fnss.read_event_schedule(tmp_es_file)
     self.assertEqual(len(schedule), len(read_schedule))
     read_time, read_event = read_schedule[2]
     self.assertEqual(time, read_time)
     self.assertEqual(event, read_event)
Example #3
0
 def test_read_write_event_schedule_special_type(self):
     schedule = fnss.EventSchedule()
     event = {'tuple_param': (1, 2, 3),
              'dict_param': {'a': 1, 'b': 2},
              'list_param':[1, 'hello', 0.3]}
     schedule.add(1, event)
     tmp_es_file = path.join(TMP_DIR, 'event-schedule-special.xml')
     fnss.write_event_schedule(schedule, tmp_es_file)
     read_schedule = fnss.read_event_schedule(tmp_es_file)
     self.assertEqual(len(schedule), len(read_schedule))
     _, read_event = read_schedule[0]
     self.assertEqual(event, read_event)
     self.assertEqual(tuple, type(read_event['tuple_param']))
     self.assertEqual(list, type(read_event['list_param']))
     self.assertEqual(dict, type(read_event['dict_param']))
     self.assertEqual(event['dict_param'], read_event['dict_param'])
     self.assertEqual(event['list_param'], read_event['list_param'])
     self.assertEqual(event['tuple_param'], read_event['tuple_param'])
Example #4
0
 def test_read_write_event_schedule_special_type(self):
     schedule = fnss.EventSchedule()
     event = {'tuple_param': (1, 2, 3),
              'dict_param': {'a': 1, 'b': 2},
              'list_param':[1, 'hello', 0.3]}
     schedule.add(1, event)
     tmp_es_file = path.join(TMP_DIR, 'event-schedule-special.xml')
     fnss.write_event_schedule(schedule, tmp_es_file)
     read_schedule = fnss.read_event_schedule(tmp_es_file)
     self.assertEqual(len(schedule), len(read_schedule))
     _, read_event = read_schedule[0]
     self.assertEqual(event, read_event)
     self.assertEqual(tuple, type(read_event['tuple_param']))
     self.assertEqual(list, type(read_event['list_param']))
     self.assertEqual(dict, type(read_event['dict_param']))
     self.assertEqual(event['dict_param'], read_event['dict_param'])
     self.assertEqual(event['list_param'], read_event['list_param'])
     self.assertEqual(event['tuple_param'], read_event['tuple_param'])
Example #5
0
def scenario_simple_test():
    """
    Makes simple scenario for test puropses
    """
    def gen_event(receivers, contents):
        return {'receiver': choice(receivers), 'content': choice(contents)}
    contents = {5: [1, 2, 3, 4], 7: [5, 6, 7, 8]}
    n_caches = 4
    size = 5
    topology = fnss.ring_topology(n_caches)
    for u in range(n_caches):
        v = u + n_caches
        topology.add_edge(u, v)
        fnss.add_stack(topology, u, 'cache', {'size': size})
        if u % 2 == 0:
            fnss.add_stack(topology, v, 'receiver', {})
        else:
            fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    
    event_schedule = fnss.poisson_process_event_schedule(20, 0, 300, 'ms', 
                                                gen_event, [4, 6], range(1, 9))
    fnss.write_topology(topology, path.join(scenarios_dir, 'TOPO_TEST.xml'))
    fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, 'ES_TEST.xml'))
Example #6
0
right_nodes = [
    nodes for nodes in node_types if node_types[nodes] == 'right_bell'
]

for node in left_nodes:
    fnss.add_application(topology, node, 'receiver', {})

for node in right_nodes:
    fnss.add_application(topology, node, 'source', {})


# now create a function that generate events
def rand_request(source_nodes, receiver_nodes):
    source = random.choice(source_nodes)
    receiver = random.choice(receiver_nodes)
    return {'source': source, 'receiver': receiver}


event_schedule = fnss.poisson_process_event_schedule(
    avg_interval=50,  # 50 ms
    t_start=0,  # starts at 0
    duration=10 * 1000,  # 10 sec
    t_unit='ms',  # milliseconds
    event_generator=rand_request,  # event gen function
    source_nodes=right_nodes,  # rand_request argument
    receiver_nodes=left_nodes  # rand_request argument
)
# Write topology and event schedule to files
fnss.write_topology(topology, 'topology.xml')
fnss.write_event_schedule(event_schedule, 'event_schedule.xml')
Example #7
0
def scenario_garr(net_cache=[0.01, 0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'GARR' # name of the topology
    
    topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Garr201201.graphml')).to_undirected()
    # sources are nodes representing neighbouring AS's
    sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
    # receivers are internal nodes with degree = 1
    receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
    # caches are all remaining nodes --> 27 caches
    caches = [n for n in topology.nodes() if n not in receivers + sources]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    
    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',[(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
Example #8
0
def scenario_tiscali(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'TISCALI' # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_rocketfuel_isp_map(path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)
    onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
    
    # we select as caches nodes with highest degrees
    # we use as min degree 6 --> 36 nodes
    # If we changed min degrees, that would be the number of caches we would have:
    # Min degree    N caches
    #  2               160
    #  3               102
    #  4                75
    #  5                50
    #  6                36
    #  7                30
    #  8                26
    #  9                19
    # 10                16
    # 11                12
    # 12                11
    # 13                 7
    # 14                 3
    # 15                 3
    # 16                 2
    caches = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
    
    # sources are node with degree 1 whose neighbor has degree at least equal to 5
    # we assume that sources are nodes connected to a hub
    # they are 44
    sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are 

    # receivers are node with degree 1 whose neighbor has degree at most equal to 4
    # we assume that receivers are nodes not well connected to the network
    # they are 36   
    receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]

    # we set router stacks because some strategies will fail if no stacks
    # are deployed 
    routers = [v for v in topology.nodes() if v not in caches + sources + receivers]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
            
            
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
Example #9
0
def scenario_geant(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000
    
    T = 'GEANT' # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Geant2012.graphml')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)

    receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
    
    caches = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
    
    # attach sources to topology
    source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
    sources = []
    for v in source_attachments:
        u = v + 1000 # node ID of source
        topology.add_edge(v, u)
        sources.append(u)
    
    routers = [v for v in topology.nodes() if v not in caches + sources + receivers]
    
    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)
    
    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')
    
    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
            
            
    for nc in net_cache:
        size = (float(nc)*n_contents)/len(caches) # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
Example #10
0
                        links=topology.edges(),  # 'links' argument
                        )

# Now let's create a schedule with link restoration events
# We assume that the duration of a failure is exponentially distributed with
# average 1 minute.
restore_schedule = fnss.EventSchedule(t_start=0, t_unit='min')
for failure_time, event in event_schedule:
    link = event['link']
    restore_time = failure_time + random.expovariate(1)
    restore_schedule.add(time=restore_time,
                         event={'link': link, 'action': 'up'},
                         absolute_time=True
                         )

# Now merge failure and restoration schedules
# After merging events are still chronologically sorted
event_schedule.add_schedule(restore_schedule)

# Note: there are several ways to create this link failure-restoration schedule
# This method has been used to illustrate a variety of functions and methods
# that FNSS provides to manipulate event schedules

# Write topology, event schedule and traffic matrix to files
fnss.write_topology(topology, 'topology.xml')
fnss.write_event_schedule(event_schedule, 'event_schedule.xml')
fnss.write_traffic_matrix(traffic_matrix, 'traffic_matrix.xml')



Example #11
0
cloud = [0]
edges = [
    nodes for nodes in node_types
    if node_types[nodes] == 'switch' and nodes != 0
]

event_schedule_mob = fnss.poisson_process_event_schedule(
    avg_interval=5,
    t_start=0,
    duration=5000,
    t_unit='ms',
    event_generator=rand_mobility,  # event gen function
    nodes=drones,  # 'nodes' argument
)

event_schedule2 = fnss.poisson_process_event_schedule(
    avg_interval=10,
    t_start=0,
    duration=5000,
    t_unit='ms',
    event_generator=rand_request,  # event gen function
    source_nodes=cloud,  # rand_request argument
    receiver_nodes=drones  # rand_request argument
)

# save topology to a file
fnss.write_topology(topology, 'topology.xml')
fnss.write_event_schedule(event_schedule, 'event_schedule.xml')
fnss.write_event_schedule(event_schedule2, 'event_schedule2.xml')
fnss.write_event_schedule(event_schedule_mob, 'event_schedule_mob.xml')
fnss.write_traffic_matrix(traffic_matrix, 'traffic_matrix.xml')
Example #12
0
def scenario_single_cache(net_cache=[0.01], n_contents=100000, alpha=[1.0]):
    """
        Parameters
        ----------
        scenario_id : str
        String identifying the scenario (will be in the filename)
        net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
        n_contents : int
        Size of content population
        alpha : float
        List of alpha of Zipf content distribution
        """
    rate = 4.0
    warmup = 0
    duration = 25000
    numnodes = 2
    
    T = 'SINGLE_CACHE' # name of the topology
    topology = fnss.topologies.simplemodels.line_topology(numnodes)
    #topology = fnss.parse_topology_zoo(path.join(scenarios_dir, 'resources/Geant2012.graphml')).to_undirected()
    topology = nx.connected_component_subgraphs(topology)[0]
    
    deg = nx.degree(topology)
    
    nodes = topology.nodes()
    
    #receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
    receiver = nodes[0]
    
    #caches = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
    cache = nodes[1]
    
    # attach sources to topology
    #source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
    source_attachment = cache

    #sources = []
    #for v in source_attachments:
    #    u = v + 1000 # node ID of source
    #    topology.add_edge(v, u)
    #    sources.append(u)
    source = source_attachment + 1000
    topology.add_edge(source_attachment, source)
    
    
    #routers = [v for v in topology.nodes() if v not in caches + sources + receivers]
    #router = nodes[1]
    
    # randomly allocate contents to sources
    #contents = dict([(v, []) for v in sources])
    #for c in range(1, n_contents + 1):
    #    s = choice(sources)
    #    contents[s].append(c)
    contents = dict([(source, [])])
    for c in range(1, n_contents + 1):
        contents[source].append(c)
    
    #for v in sources:
    #    fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    #for v in receivers:
    #    fnss.add_stack(topology, v, 'receiver', {})
    #for v in routers:
    #    fnss.add_stack(topology, v, 'router', {})

    fnss.add_stack(topology, source, 'source', {'contents': contents[source]})
    fnss.add_stack(topology, receiver, 'receiver', {})
    #fnss.add_stack(topology, router, 'router', {})


    
    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')
    
    # label links as internal or external
    #for u, v in topology.edges():
    #    if u in sources or v in sources:
    #        topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            #fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            #fnss.set_delays_constant(topology, external_link_delay, 'ms', [(u, v)])
        #else:
        #    topology.edge[u][v]['type'] = 'internal'
    
    topology.edge[source_attachment][source]['type'] = 'external'
    fnss.set_weights_constant(topology, 1000.0, [(source_attachment, source)])
    fnss.set_delays_constant(topology, external_link_delay, 'ms', [(source_attachment, source)])
    topology.edge[receiver][cache]['type'] = 'internal'
    #topology.edge[router][cache]['type'] = 'internal'
    
    for nc in net_cache:
        #size = (float(nc)*n_contents)/len(caches) # size of a single cache
        size = (float(nc)*n_contents)
        C = str(nc)
        fnss.add_stack(topology, cache, 'cache', {'size': size})
        fnss.write_topology(topology, path.join(scenarios_dir, topo_prefix + 'T=%s@C=%s' % (T, C)  + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    
    receivers = []
    receivers.append(receiver)
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration, n_contents, a)
        fnss.write_event_schedule(event_schedule, path.join(scenarios_dir, es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' % (T, str(a), len(event_schedule)))
def scenario_garr(net_cache=[0.01, 0.05],
                  n_contents=100000,
                  alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'GARR'  # name of the topology

    topology = fnss.parse_topology_zoo(
        path.join(scenarios_dir,
                  'resources/Garr201201.graphml')).to_undirected()
    # sources are nodes representing neighbouring AS's
    sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
    # receivers are internal nodes with degree = 1
    receivers = [
        1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53,
        57, 60
    ]
    # caches are all remaining nodes --> 27 caches
    caches = [n for n in topology.nodes() if n not in receivers + sources]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'
    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))
    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
def scenario_tiscali(net_cache=[0.05],
                     n_contents=100000,
                     alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'TISCALI'  # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_rocketfuel_isp_map(
        path.join(scenarios_dir, 'resources/3257.r0.cch')).to_undirected()
    topology = list(nx.connected_component_subgraphs(topology))[0]

    deg = nx.degree(topology)
    onedeg = [v for v in topology.nodes() if deg[v] == 1]  # they are 80

    # we select as caches nodes with highest degrees
    # we use as min degree 6 --> 36 nodes
    # If we changed min degrees, that would be the number of caches we would have:
    # Min degree    N caches
    #  2               160
    #  3               102
    #  4                75
    #  5                50
    #  6                36
    #  7                30
    #  8                26
    #  9                19
    # 10                16
    # 11                12
    # 12                11
    # 13                 7
    # 14                 3
    # 15                 3
    # 16                 2
    caches = [v for v in topology.nodes() if deg[v] >= 6]  # 36 nodes

    # sources are node with degree 1 whose neighbor has degree at least equal to 5
    # we assume that sources are nodes connected to a hub
    # they are 44
    sources = [
        v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5
    ]  # they are

    # receivers are node with degree 1 whose neighbor has degree at most equal to 4
    # we assume that receivers are nodes not well connected to the network
    # they are 36
    receivers = [
        v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5
    ]

    # we set router stacks because some strategies will fail if no stacks
    # are deployed
    routers = [
        v for v in topology.nodes() if v not in caches + sources + receivers
    ]

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))

    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
def scenario_geant(net_cache=[0.05], n_contents=100000, alpha=[0.6, 0.8, 1.0]):
    """
    Return a scenario based on GARR topology
    
    Parameters
    ----------
    scenario_id : str
        String identifying the scenario (will be in the filename)
    net_cache : float
        Size of network cache (sum of all caches) normalized by size of content
        population
    n_contents : int
        Size of content population
    alpha : float
        List of alpha of Zipf content distribution
    """
    rate = 12.0
    warmup = 9000
    duration = 36000

    T = 'GEANT'  # name of the topology
    # 240 nodes in the main component
    topology = fnss.parse_topology_zoo(
        path.join(scenarios_dir,
                  'resources/Geant2012.graphml')).to_undirected()
    topology = list(nx.connected_component_subgraphs(topology))[0]

    deg = nx.degree(topology)

    receivers = [v for v in topology.nodes() if deg[v] == 1]  # 8 nodes

    caches = [v for v in topology.nodes() if deg[v] > 2]  # 19 nodes

    # attach sources to topology
    source_attachments = [v for v in topology.nodes()
                          if deg[v] == 2]  # 13 nodes
    sources = []
    for v in source_attachments:
        u = v + 1000  # node ID of source
        topology.add_edge(v, u)
        sources.append(u)

    routers = [
        v for v in topology.nodes() if v not in caches + sources + receivers
    ]

    # randomly allocate contents to sources
    contents = dict([(v, []) for v in sources])
    for c in range(1, n_contents + 1):
        s = choice(sources)
        contents[s].append(c)

    for v in sources:
        fnss.add_stack(topology, v, 'source', {'contents': contents[v]})
    for v in receivers:
        fnss.add_stack(topology, v, 'receiver', {})
    for v in routers:
        fnss.add_stack(topology, v, 'router', {})

    # set weights and delays on all links
    fnss.set_weights_constant(topology, 1.0)
    fnss.set_delays_constant(topology, internal_link_delay, 'ms')

    # label links as internal or external
    for u, v in topology.edges():
        if u in sources or v in sources:
            topology.edge[u][v]['type'] = 'external'
            # this prevents sources to be used to route traffic
            fnss.set_weights_constant(topology, 1000.0, [(u, v)])
            fnss.set_delays_constant(topology, external_link_delay, 'ms',
                                     [(u, v)])
        else:
            topology.edge[u][v]['type'] = 'internal'

    for nc in net_cache:
        size = (float(nc) * n_contents) / len(caches)  # size of a single cache
        C = str(nc)
        for v in caches:
            fnss.add_stack(topology, v, 'cache', {'size': size})
        fnss.write_topology(
            topology,
            path.join(scenarios_dir,
                      topo_prefix + 'T=%s@C=%s' % (T, C) + '.xml'))
        print('[WROTE TOPOLOGY] T: %s, C: %s' % (T, C))

    for a in alpha:
        event_schedule = gen_req_schedule(receivers, rate, warmup, duration,
                                          n_contents, a)
        fnss.write_event_schedule(
            event_schedule,
            path.join(scenarios_dir,
                      es_prefix + 'T=%s@A=%s' % (T, str(a)) + '.xml'))
        print('[WROTE SCHEDULE] T: %s, Alpha: %s, Events: %d' %
              (T, str(a), len(event_schedule)))
Example #16
0
event_schedule = fnss.poisson_process_event_schedule(
    avg_interval=0.5,  # 0.5 min = 30 sec
    t_start=0,  # starts at 0
    duration=60,  # 2 hours
    t_unit="min",  # minutes
    event_generator=rand_failure,  # event gen function
    links=topology.edges(),  # 'links' argument
)

# Now let's create a schedule with link restoration events
# We assume that the duration of a failure is exponentially distributed with
# average 1 minute.
restore_schedule = fnss.EventSchedule(t_start=0, t_unit="min")
for failure_time, event in event_schedule:
    link = event["link"]
    restore_time = failure_time + random.expovariate(1)
    restore_schedule.add(time=restore_time, event={"link": link, "action": "up"}, absolute_time=True)

# Now merge failure and restoration schedules
# After merging events are still chronologically sorted
event_schedule.add_schedule(restore_schedule)

# Note: there are several ways to create this link failure-restoration schedule
# This method has been used to illustrate a variety of functions and methods
# that FNSS provides to manipulate event schedules

# Write topology, event schedule and traffic matrix to files
fnss.write_topology(topology, "topology.xml")
fnss.write_event_schedule(event_schedule, "event_schedule.xml")
fnss.write_traffic_matrix(traffic_matrix, "traffic_matrix.xml")