Beispiel #1
0
def __calc_nfur(topology, fast, parallelize=True):
    """
    Calculate the Number of Flows under Failure (NFUR) for all nodes of a
    topology
    
    Parameters
    ----------
    topology : Topology or DirectedTopology
        The topology
    fast : bool
        If True returns betweenness centrality instead of NFUR
    parallelize : bool
        If True, spawns as many processes as the number of cores of the machine
        using the map-reduce algorithm. It is always recommended unless the
        topology is very small. If *fast* parameter is True, this option is
        ignored, as betweenness centrality calculation cannot be parallelized.
    
    Returns
    -------
    nfur : dict
        A dictionary of the NFURs of the topology keyed by node. If fast is
        True, returns betweenness centrality instead.
    
    Notes
    -----
    The time complexity of the calculation of NFUR grows linearly with the
    number of links. A topology with thousands of links is likely to take hours
    to compute on commodity hardware. For this reason, this function can spawn
    as many processes as the number of cores of the machine on which it runs
    and parallelizes the task with a map-reduce algorithm.   
    """
    # Note: The NFUR calculation doesn't scale because I need to calc betw for
    # each edge removed. With many nodes and edges, takes very long. The
    # parallelization reduced the time but it can still be very long
    betw = nx.betweenness_centrality(topology,
                                     normalized=False,
                                     weight='weight')
    if fast:
        return betw
    edges = topology.edges()
    if not parallelize:
        # execute the NFUR calculation in one single process
        # Recommended only if the size of the topology is so small that the
        # overhead of creating new processes overcomes the performance gains
        # achieved by splitting the calculation
        return __nfur_func(topology, edges, betw)
    try:
        processes = mp.cpu_count()
    except NotImplementedError:
        processes = 32  # upper bound of number of cores on a commodity server
    pool = mp.Pool(processes)
    # map operation
    edges_chunks = util.split_list(edges, len(edges) // processes)
    #    args = ((topology, chunk, betw) for chunk in edges_chunks)
    #    result = pool.map(__nfur_func, args)
    args = [(__nfur_func, (topology, chunk, betw)) for chunk in edges_chunks]
    result = pool.map(util.map_func, args)
    # reduce operation
    return dict(
        (v, max((result[i][v] for i in range(len(result))))) for v in betw)
Beispiel #2
0
def __calc_nfur(topology, fast, parallelize=True):
    """
    Calculate the Number of Flows under Failure (NFUR) for all nodes of a
    topology
    
    Parameters
    ----------
    topology : Topology or DirectedTopology
        The topology
    fast : bool
        If True returns betweenness centrality instead of NFUR
    parallelize : bool
        If True, spawns as many processes as the number of cores of the machine
        using the map-reduce algorithm. It is always recommended unless the
        topology is very small. If *fast* parameter is True, this option is
        ignored, as betweenness centrality calculation cannot be parallelized.
    
    Returns
    -------
    nfur : dict
        A dictionary of the NFURs of the topology keyed by node. If fast is
        True, returns betweenness centrality instead.
    
    Notes
    -----
    The time complexity of the calculation of NFUR grows linearly with the
    number of links. A topology with thousands of links is likely to take hours
    to compute on commodity hardware. For this reason, this function can spawn
    as many processes as the number of cores of the machine on which it runs
    and parallelizes the task with a map-reduce algorithm.   
    """
    # Note: The NFUR calculation doesn't scale because I need to calc betw for
    # each edge removed. With many nodes and edges, takes very long. The
    # parallelization reduced the time but it can still be very long 
    betw = nx.betweenness_centrality(topology, normalized=False,
                                     weight='weight')
    if fast:
        return betw
    edges = topology.edges()
    if not parallelize:
        # execute the NFUR calculation in one single process
        # Recommended only if the size of the topology is so small that the
        # overhead of creating new processes overcomes the performance gains
        # achieved by splitting the calculation
        return __nfur_func(topology, edges, betw)
    try:
        processes = mp.cpu_count()
    except NotImplementedError:
        processes = 32 # upper bound of number of cores on a commodity server
    pool = mp.Pool(processes)
    # map operation
    edges_chunks = util.split_list(edges, len(edges)//processes)
#    args = ((topology, chunk, betw) for chunk in edges_chunks)
#    result = pool.map(__nfur_func, args)
    args = [(__nfur_func, (topology, chunk, betw)) for chunk in edges_chunks]
    result = pool.map(util.map_func, args)
    # reduce operation
    return dict((v, max((result[i][v] for i in range(len(result)))))
                for v in betw)
Beispiel #3
0
 def test_remainder(self):
     l = util.split_list([1, 2, 3], 2)
     self.assertEqual(l, [[1, 2], [3]])
Beispiel #4
0
 def test_no_remainder(self):
     l = util.split_list([1, 2, 3, 4, 5, 6], 2)
     self.assertEqual(l, [[1, 2], [3, 4], [5, 6]])
Beispiel #5
0
 def test_no_remainder(self):
     l = util.split_list([1, 2, 3, 4, 5, 6], 2)
     self.assertEqual(l, [[1, 2], [3, 4], [5, 6]])
Beispiel #6
0
 def test_remainder(self):
     l = util.split_list([1, 2, 3], 2)
     self.assertEqual(l, [[1, 2], [3]])