예제 #1
0
파일: test_sw.py 프로젝트: rik0/pynetsym
    def testRun(self):
        sim = watts_strogatz.WS()
        sim.run(
            steps=self.starting_network_size,
            rewiring_probability=self.rewiring_probability,
            lattice_connections=self.lattice_connections,
            starting_network_size=self.starting_network_size)

        with sim.graph.handle as graph:
            self.assertEqual(
                self.comparison_graph.number_of_nodes(),
                graph.number_of_nodes())
            self.assertEqual(
                self.comparison_graph.number_of_edges(),
                graph.number_of_edges())

            if False:
                self.assertAlmostEqual(
                    nx.diameter(self.comparison_graph),
                    nx.diameter(graph),
                    delta=1.
                )
                self.assertAlmostEqual(
                    nx.average_shortest_path_length(self.comparison_graph),
                    nx.average_shortest_path_length(graph),
                    delta=1.
                )
def compare_graphs(graph):
    n = nx.number_of_nodes(graph)
    m = nx.number_of_edges(graph)
    k = np.mean(list(nx.degree(graph).values()))
    erdos = nx.erdos_renyi_graph(n, p=m/float(n*(n-1)/2))
    barabasi = nx.barabasi_albert_graph(n, m=int(k)-7)
    small_world = nx.watts_strogatz_graph(n, int(k), p=0.04)
    print(' ')
    print('Compare the number of edges')
    print(' ')
    print('My network: ' + str(nx.number_of_edges(graph)))
    print('Erdos: ' + str(nx.number_of_edges(erdos)))
    print('Barabasi: ' + str(nx.number_of_edges(barabasi)))
    print('SW: ' + str(nx.number_of_edges(small_world)))
    print(' ')
    print('Compare average clustering coefficients')
    print(' ')
    print('My network: ' + str(nx.average_clustering(graph)))
    print('Erdos: ' + str(nx.average_clustering(erdos)))
    print('Barabasi: ' + str(nx.average_clustering(barabasi)))
    print('SW: ' + str(nx.average_clustering(small_world)))
    print(' ')
    print('Compare average path length')
    print(' ')
    print('My network: ' + str(nx.average_shortest_path_length(graph)))
    print('Erdos: ' + str(nx.average_shortest_path_length(erdos)))
    print('Barabasi: ' + str(nx.average_shortest_path_length(barabasi)))
    print('SW: ' + str(nx.average_shortest_path_length(small_world)))
    print(' ')
    print('Compare graph diameter')
    print(' ')
    print('My network: ' + str(nx.diameter(graph)))
    print('Erdos: ' + str(nx.diameter(erdos)))
    print('Barabasi: ' + str(nx.diameter(barabasi)))
    print('SW: ' + str(nx.diameter(small_world)))
예제 #3
0
 def make_graph(self,save_graph=True):
     graph = nx.DiGraph()
     all_tweets = [tweet for page in self.results for tweet in page['results']]
     for tweet in all_tweets:
         rt_sources = self.get_rt_sources(tweet["text"])
         if not rt_sources: continue 
         for rt_source in rt_sources:
             graph.add_edge(rt_source, tweet["from_user"], {"tweet_id": tweet["id"]})
     #--Calculate graph summary statistics
     if nx.is_connected(graph.to_undirected()):
         diameter  = nx.diameter(graph.to_undirected())         
         average_shortest_path = nx.average_shortest_path_length(graph.to_undirected())
         print 'Diameter: ', diameter
         print 'Average Shortest Path: ',average_shortest_path
     else:
          print "Graph is not connected so calculating the diameter and average shortest path length on all connected components."
          diameter = []
          average_shortest_path = []
          for subgraph in nx.connected_component_subgraphs(graph.to_undirected()):
              diameter.append(nx.diameter(subgraph))
              average_shortest_path.append(nx.average_shortest_path_length(subgraph))
          from numpy import median
          from scipy.stats import scoreatpercentile
          print 'Diameter: ',median(diameter),u'\xB1',str(scoreatpercentile(diameter,75)-scoreatpercentile(diameter,25))
          print 'Average Path Length :',median(average_shortest_path),u'\xB1',str(scoreatpercentile(average_shortest_path,75)-scoreatpercentile(average_shortest_path,25))
     degree_sequence=sorted(nx.degree(graph).values(),reverse=True) # degree sequence
        
     import matplotlib.pyplot as plt
     plt.loglog(degree_sequence,'b-',marker='o')
     plt.title("Distribution of Degrees for %s tweets" %(self.drug_name), fontsize=20)
     plt.ylabel("Degree", fontsize=20)
     plt.xlabel("Rank", fontsize=20)
     
     # draw graph in inset
     ax = plt.axes([0.35,0.25,0.55,0.55])
     plt.axis('off')
     nx.draw(graph, ax=ax, alpha=0.8, with_labels=False)
     
     plt.savefig("degree_distribution_%s.png"%(self.drug_name.replace(' ','_')), dpi=300)
     plt.close()
     if save_graph:
         output_file = self.drug_name.replace(' ','_') + '.dot'
         try:
             nx.drawing.write_dot(graph,output_file)
             print 'Graph saved as ',output_file
         except (ImportError, UnicodeEncodeError) as e:
             dot = ['"%s" -> "%s" [tweetid=%s]' % (node1,node2,graph[node1][node2]['tweet_id']) 
                     for node1,node2, in graph.edges()]
             with codecs.open(output_file,'w', encoding='utf-8') as f:
                 f.write('strict digraph G{\n%s\n}' % (';\n'.join(dot),))
                 print 'Saved ',output_file,' by brute force'
     return diameter, average_shortest_path
                 
예제 #4
0
 def getDiameter(self):
     graph = self.getGraph()
     try:
         diameter = nx.diameter(graph)
     # NetworkX will throw an exception if the graph is not connected (~ infinite diameter)
     except nx.NetworkXError:
         return -1
예제 #5
0
def diameter(g, weighted):
    if not weighted:
        return nx.diameter(g)
    else:
        ret = nx.all_pairs_dijkstra_path_length(g)
        return max(map(lambda perSourceDists: max(perSourceDists.values()), ret.values()))
        pass
def main():

    calendar = pd.read_csv("calendar.csv")
    G=nx.Graph()


    for r in calendar.index:

        G.add_edge(calendar.ix[r].team_a, calendar.ix[r].team_b)
        #print calendar.ix[r].team_a, calendar.ix[r].team_b

    nx.draw_networkx(G, node_size=100, node_color='b', label='World cup past matches')
    plt.show()


    print 'Diameter:', nx.diameter(G)

    print 'Degree Centrality '
    degree_centrality = nx.degree_centrality(G)
    for w in sorted(degree_centrality, key=degree_centrality.get, reverse=True)[:10]:
        print w, degree_centrality[w]

    print
    print 'Betweenness Centrality'
    betweenness_centrality = nx.betweenness_centrality(G)
    for w in sorted(betweenness_centrality, key=betweenness_centrality.get, reverse=True)[:10]:
        print w, betweenness_centrality[w]
def failure(compagnia):
    adiacenzaFinal = numpy.genfromtxt(("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(compagnia)),delimiter=',',dtype='int')
    grafoFinal = networkx.Graph(adiacenzaFinal)

    graphSize = networkx.number_of_nodes(grafoFinal)
    steps = graphSize
    passo = 1
    i = 0
    ascisse.append(i)
    aziendaFinal.append(compagnia)
    diametro.append(2)
    relSizeGC.append(1)
    
    while (networkx.number_of_nodes(grafoFinal) > passo):
        gradiFinal = pandas.DataFrame(grafoFinal.degree().items(), columns=['index', 'grado'])
        randomante = gradiFinal['index'].values
        randomante = numpy.random.permutation(randomante)

        grafoFinal.remove_node(randomante[0])
    
        giantCluster = max(networkx.connected_component_subgraphs(grafoFinal), key = len)
                            
        i += 100/steps
        ascisse.append(i)
        aziendaFinal.append(compagnia)

        graphSize = networkx.number_of_nodes(grafoFinal)
        diametro.append(networkx.diameter(giantCluster, e=None))
        relSizeGC.append((networkx.number_of_nodes(giantCluster))/(float(graphSize)))
def attaccoPercent(compagnia, steps):
    adiacenzaFinal = numpy.genfromtxt(("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(compagnia)),delimiter=',',dtype='int')
    grafoFinal = networkx.Graph(adiacenzaFinal)

    graphSize = networkx.number_of_nodes(grafoFinal)
    passo = networkx.number_of_nodes(grafoFinal)/steps

    i = 0
    ascisse.append(i)
    aziendaFinal.append(compagnia)
    diametro.append(2)
    relSizeGC.append(1)

    while (networkx.number_of_nodes(grafoFinal) > passo):
        gradiFinal = pandas.DataFrame(grafoFinal.degree().items(), columns=['index', 'grado'])
        gradiFinal.sort(["grado"], ascending=[False], inplace=True)
        sortedIDnode = gradiFinal['index'].values

#        grafoFinal.remove_nodes_from(sortedIDnode[0:passo])
        for identificativo in sortedIDnode:
            if (networkx.number_of_nodes(grafoFinal) > len(sortedIDnode) - passo):
                   grafoFinal.remove_node(identificativo)

        giantCluster = max(networkx.connected_component_subgraphs(grafoFinal), key = len)
        
        i += 100/float(steps)
        ascisse.append(i)
        aziendaFinal.append(compagnia)

        graphSize = networkx.number_of_nodes(grafoFinal)
        diametro.append(networkx.diameter(giantCluster, e=None))
        relSizeGC.append((networkx.number_of_nodes(giantCluster))/(float(graphSize)))
예제 #9
0
def randomFailure(compagnia, steps):
    adiacenzaFinal = numpy.genfromtxt(
        ("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(compagnia)),
        delimiter=",",
        dtype="int",
    )
    grafoFinal = networkx.Graph(adiacenzaFinal)

    graphSize = networkx.number_of_nodes(grafoFinal)
    passo = networkx.number_of_nodes(grafoFinal) / float(steps)

    i = 0
    ascisse.append(i)
    aziendaFinal.append(compagnia)
    diametro.append(2)
    relSizeGC.append(1)

    while networkx.number_of_nodes(grafoFinal) > passo:
        gradiFinal = pandas.DataFrame(grafoFinal.degree().items(), columns=["index", "grado"])
        randomante = gradiFinal["index"].values
        randomante = numpy.random.permutation(randomante)

        for identificativo in randomante:
            if networkx.number_of_nodes(grafoFinal) > len(randomante) - passo:
                grafoFinal.remove_node(identificativo)

        giantCluster = max(networkx.connected_component_subgraphs(grafoFinal), key=len)

        i += 100 / steps
        ascisse.append(i)
        aziendaFinal.append(compagnia)

        graphSize = networkx.number_of_nodes(grafoFinal)
        diametro.append(networkx.diameter(giantCluster, e=None))
        relSizeGC.append((networkx.number_of_nodes(giantCluster)) / (float(graphSize)))
예제 #10
0
파일: __init__.py 프로젝트: CSB-IG/NinNX
def NetStats(G):
    return { 'radius': nx.radius(G),
             'diameter': nx.diameter(G),
             'connected_components': nx.number_connected_components(G),
             'density' : nx.density(G),
             'shortest_path_length': nx.shortest_path_length(G),
             'clustering': nx.clustering(G)}
예제 #11
0
def write_network(network, time, targets, seed, filename="network.dat"):
    print("\tDetermining network type.")
    if isinstance(network,nx.DiGraph):
        network_type = "Directed"
    else:
        network_type = "Undirected"

    print("\tCalculaing edges and vertices.")
    edges = network.number_of_edges()
    vertices = network.number_of_nodes()
    undirected = network.to_undirected()

    print("\tFinding subgraphs.")
    subgraphs = nx.connected_component_subgraphs(undirected)

    print("\tFinding network diameter.")
    diameter = nx.diameter(subgraphs[0])

    print("\tStoring network parameters")

    out = open(filename, "w")
    out.write("Simulation name: {0}\n\n".format(time))
    out.write("Network properties\n===============\n")
    out.write("Network type: {0}\n".format(network_type))
    out.write("Number of vertices: {0}\n".format(vertices))
    out.write("Number of edges: {0}\n".format(edges))
    out.write("Diameter: {0}\n".format(diameter))

    out.close()
예제 #12
0
    def write_network_characteristics(g):
        nodes = len(g.nodes())
        edges = len(g.edges())
        avg_degree = float(2*edges)/nodes
        max_conn = (nodes*(nodes-1))/2
        clustering = nx.average_clustering(g)
        density = nx.density(g)
        diameter = nx.diameter(g)
        a_p_l = nx.average_shortest_path_length(g)
        conn = nx.is_connected(g)
        n_comp_con = nx.number_connected_components(g)
        # write them on file
        out = open("statistics_giant.csv", "w")
        out.write("#Nodes,#Edges,Avg_Degree, Max Connection, Clustering Coefficient, Density, Diameter , Average Shortest Path ,  Is Connected? , Number Connected Component\n")
        out.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (nodes, edges, avg_degree , max_conn, clustering, density ,diameter ,a_p_l, conn , n_comp_con))

        g = read_graph("dataset/cutted_graph(0.15).csv")

        degree_distribution(g0)

        #Extract max Giant component
        cc=sorted(nx.connected_component_subgraphs(g), key = len, reverse=True)
        g0=gcc[0]

        write_network_characteristics(g0)
예제 #13
0
def init_tracks(T, root, node_to_tm, edge_to_rate,
        primary_to_tol, Q_primary,
        primary_track, tolerance_tracks, interaction_map):
    """
    Initialize trajectories of all tracks.

    """
    # Initialize blink history and events.
    for track in tolerance_tracks:
        init_blink_history(T, track)
        init_complete_blink_events(T, node_to_tm, edge_to_rate, track)
        track.remove_self_transitions()

    # Initialize the primary trajectory with many incomplete events.
    diameter = nx.diameter(Q_primary)
    ev_to_P_nx = init_incomplete_primary_events(T, node_to_tm, edge_to_rate,
            primary_track, diameter)

    # Sample the state of the primary track.
    sample_primary_transitions(T, root, node_to_tm, edge_to_rate,
            primary_to_tol, ev_to_P_nx,
            primary_track, tolerance_tracks)

    # Remove self-transition events from the primary track.
    primary_track.remove_self_transitions()
예제 #14
0
파일: ltDecomp3.py 프로젝트: joeyh321/ORCA
def ltDecomposeTestBatFull(dsName, path, outfile, cd, wccOnly, revEdges, undir, diaF, fillF):
    origNet = loadNw(dsName, path, cd, wccOnly, revEdges, undir)
    prodNet = origNet
    # prodNet = copy.deepcopy(origNet)
    # print("dc")
    outfile = open(path + outfile + ".csv", "w")
    intFlag = False
    print("NW-WIDE MEASURES:\n")

    nodeStr = str(origNet.number_of_nodes())
    edgeStr = str(origNet.number_of_edges())
    avgDeg = str(float(origNet.number_of_edges()) / float(origNet.number_of_nodes()))
    dens = str(nx.density(origNet))
    avgCl = "--"
    # avgCl = str(nx.average_clustering(origNet))

    if diaF:
        print("  Starting dia calc")
        diameter = str(nx.diameter(origNet))
        print("  --> done w. dia calc")
    else:
        diameter = "---"

        # outfile.write("Dataset,NumNodes,NumEdges,avgDeg,dens,avgCl,diameter\n")
        # outfile.write(dsName+","+nodeStr+","+edgeStr+","+avgDeg+","+dens+","+avgCl+","+diameter+"\n")
        # if fillF:
        # 	print("FULL THRESH TEST\n")
        # outfile.write("Dataset,ThreshType,ThreshVal,PercSize,NumNodes,NumEdges,TimeAlg,TimeAlgAndSetup,Check\n")
        # thresh=1.0
        # outfile.write(ltDecomposeNoSetWithCheck(prodNet,thresh,dsName,intFlag,origNet))

    outfile.close()
    print("Done.")
예제 #15
0
def print_graph_info(graph):
  e = nx.eccentricity(graph)
  print 'graph with %u nodes, %u edges' % (len(graph.nodes()), len(graph.edges()))
  print 'radius: %s' %  nx.radius(graph, e) # min e
  print 'diameter: %s' % nx.diameter(graph, e) # max e
  print 'len(center): %s' % len(nx.center(graph, e)) # e == radius
  print 'len(periphery): %s' % len(nx.periphery(graph, e)) # e == diameter
def strongly_connected_components():
    conn = sqlite3.connect("zhihu.db")     
    #following_data = pd.read_sql('select user_url, followee_url from Following where followee_url in (select user_url from User where agree_num > 50000) and user_url in (select user_url from User where agree_num > 50000)', conn)        
    following_data = pd.read_sql('select user_url, followee_url from Following where followee_url in (select user_url from User where agree_num > 10000) and user_url in (select user_url from User where agree_num > 10000)', conn)        
    conn.close()
    
    G = nx.DiGraph()
    cnt = 0
    for d in following_data.iterrows():
        G.add_edge(d[1][0],d[1][1])
        cnt += 1
    print 'links number:', cnt

    scompgraphs = nx.strongly_connected_component_subgraphs(G)
    scomponents = sorted(nx.strongly_connected_components(G), key=len, reverse=True)
    print 'components nodes distribution:', [len(c) for c in scomponents]
    
    #plot graph of component, calculate saverage_shortest_path_length of components who has over 1 nodes
    index = 0
    print 'average_shortest_path_length of components who has over 1 nodes:'
    for tempg in scompgraphs:
        index += 1
        if len(tempg.nodes()) != 1:
            print nx.average_shortest_path_length(tempg)
            print 'diameter', nx.diameter(tempg)
            print 'radius', nx.radius(tempg)
        pylab.figure(index)
        nx.draw_networkx(tempg)
        pylab.show()

    # Components-as-nodes Graph
    cG = nx.condensation(G)
    pylab.figure('Components-as-nodes Graph')
    nx.draw_networkx(cG)
    pylab.show()    
예제 #17
0
def printStats(filename):
	'''
	Converts json adjacency list into networkx to calculate and print the
	graphs's 
	  - average clustering coefficient
	  - overall clustering coefficient
	  - maximum diameter
	  - average diameter
	  - number of paritions using community.best_parition
	  - modularity of community.best_partition
	'''
	g = makeGraphFromJSON(filename)
	
	print "Average Clustering Coefficient: %f" % nx.average_clustering(g)
	print "Overall Clustering Coefficient: %f" % nx.transitivity(g)
	
	connected_subgraphs = list(nx.connected_component_subgraphs(g))
	largest = max(nx.connected_component_subgraphs(g), key=len)
	print "# Connected Components: %d" % len(connected_subgraphs)
	print "    Maximal Diameter: %d" % nx.diameter(largest)
	print "    Average Diameter: %f" % nx.average_shortest_path_length(largest)

	# Find partition that maximizes modularity using Louvain's algorithm
	part = community.best_partition(g)	
	print "# Paritions: %d" % (max(part.values()) + 1)
	print "Louvain Modularity: %f" % community.modularity(part, g)
예제 #18
0
    def __init__(self,graph,num_hashes,ident_bits):

        # The network graph we are going to use:
        self.graph = graph

        # Assert that the graph is connected:
        assert nx.is_connected(self.graph)

        # Max layer will be the diameter of the graph:
        self.max_distance = nx.diameter(self.graph)

        # Amount of nodes:
        self.num_nodes = self.graph.number_of_nodes()

        # Amount of bits in identity:
        self.ident_bits = ident_bits

        # Maximum size of identity:
        self.max_ident = 2**self.ident_bits

        # Evade the birthday paradox:
        assert (self.num_nodes ** 2.5) <= self.max_ident

        # Amount of cryptographic hash functions to be used:
        self.num_hashes = num_hashes

        # Generate nodes and neighbours links:
        self.gen_nodes()
        self.install_neighbours()
예제 #19
0
 def min_connectivity(self, graph):
     apnc = nx.all_pairs_node_connectivity(graph)
     # start with graph diameter; minimum won't be larger than this
     mc = nx.diameter(graph)
     for targets in apnc.itervalues():
         mc = min(min(targets.itervalues()), mc)
     return mc
예제 #20
0
def NetStats(G,name):
    
    s=0
    d = nx.degree(G)    
    for i in d.values():
        s = s + i
    
    n = len(G.nodes())
    m = len(G.edges())
    k = float(s)/float(n)
    #k = nx.average_node_connectivity(G)
        
    C = nx.average_clustering(G)
    l = nx.average_shortest_path_length(G)
    Cc = nx.closeness_centrality(G)
    d = nx.diameter(G) #The diameter is the maximum eccentricity.
    r = nx.radius(G) #The radius is the minimum eccentricity.


    
    output = "ESTADISITICOS_"+name
    SALIDA = open(output,"w")
    
    SALIDA.write(("Numero de nodos n = %s \n") %  n)
    SALIDA.write(("Numero de aristas m = %s \n") %  m)
    SALIDA.write(("Grado promedio <k> = %s \n") %  k)
        
    SALIDA.write(("Clustering Coeficient = %s \n") %  C)
    SALIDA.write(("Shortest Path Length = %s \n") %  l)
    #SALIDA.write(("Closeness = %s \n") %  Cc)
    SALIDA.write(("Diameter (maximum eccentricity) = %d \n") %  d)
    SALIDA.write(("Radius (minimum eccentricity) = %d \n") %  r)
예제 #21
0
    def run(self, graph, all_disconnected_nodes,a1,a2,node=None):
        '''
        Assign weight to node depending on degree
    '''
        weights = {}
        oweights={}
        
        
        #calculate maximum and minimum degrees of nodes
        #use it for calculating assortativity
        
        d=len(graph.nodes())
        if nx.is_connected(graph):
            d=nx.diameter(graph)
        
       
        #calculate similarity of nodes based on their degrees
        for nodes in all_disconnected_nodes:
            if nx.has_path(graph,node,nodes):
                metric=nx.shortest_path_length(graph, node, nodes)/(d)
            else:
                metric=1

            weights[nodes]=a1*(1-metric)
            oweights[nodes]=a2*(metric)
            
        return Weights(weights,oweights)   
예제 #22
0
파일: views.py 프로젝트: freyley/nets
def netstats_simple(graph):
    G = graph
    if nx.is_connected(G): 
        d = nx.diameter(G)
        r = nx.radius(G)
    else: 
        d = 'NA - graph is not connected' #should be calculatable on unconnected graph - see example code for hack
        r = 'NA - graph is not connected'
   
#using dictionary to pack values and variablesdot, eps, ps, pdf break equally
    result = {#"""single value measures"""  
              'nn': G.number_of_nodes(),
              'ne': G.number_of_edges(),
              'd': d,
              'r': r,
              'conn': nx.number_connected_components(G),
              'asp': nx.average_shortest_path_length(G), 
#              """number of the largest clique"""
              'cn': nx.graph_clique_number(G),
#              """number of maximal cliques"""
              'mcn': nx.graph_number_of_cliques(G),
#              """transitivity - """
              'tr': nx.transitivity(G),
              #cc = nx.clustering(G) """clustering coefficient"""
              'avgcc': nx.average_clustering(G) } 
#    result['d'] = nx.diameter(G)
    print result
    return result
예제 #23
0
def diameter(connected_component_graphs, file_name):       
    max_diameter = 0
    for connected_component in connected_component_graphs:
        diameter  = nx.diameter(connected_component)
        if max_diameter < diameter:
            max_diameter = diameter
    file_name.write("Diameter: " + str(max_diameter) + "\n")
def calculate(network):
    try:
        n = nx.diameter(network)
    except:
        return 0
 
    return round(n, 7) 
예제 #25
0
def regular_D(n,d,D):
  while True:
    G=nx.random_regular_graph(d,n)
    if nx.is_connected(G):
      diameter = nx.diameter(G)
      if diameter == D:
        return G
def obca(g):
    diameter = nx.diameter(g)
    lb_max = diameter + 1

    # Rank the nodes according to their degree
    results = nx.degree_centrality(g)
    nodes = next(zip(*sorted(results.items(), key=operator.itemgetter(1))))
    results = dict()

    for lb in range(2, lb_max):
        covered_frequency = [0] * len(g.nodes())
        boxes = list()

        for i in range(0, len(nodes)):
            node = nodes[i]

            if covered_frequency[i] > 0:
                continue

            box = list(nx.single_source_shortest_path_length(g, node, lb-1).keys())

            # Verify that all paths within the box have the length less then lb
            index = 0
            while True:
                node = box[index]
                for j in range(index+1, len(box)):
                    neighbor = box[j]

                    if nx.shortest_path_length(g, node, neighbor) >= lb:
                        box.remove(neighbor)

                index += 1
                if index >= len(box):
                    break

            for node in box:
                node_index = nodes.index(node)
                covered_frequency[node_index] += 1

            boxes.append(box)

        for box in boxes:
            redundant_box = True

            for node in box:
                node_index = nodes.index(node)
                if covered_frequency[node_index] == 1:
                    redundant_box = False
                    break

            if redundant_box:
                for node in box:
                    node_index = nodes.index(node)
                    covered_frequency[node_index] -= 1
                boxes.remove(box)

        print("lb: {}, boxes: {}, cf: {}".format(lb, boxes, covered_frequency))
        results[lb] = boxes

    return results
예제 #27
0
def calc_D(G):
    '''
    A function to calculate the diameter
    
    input parameters:
          G:      A graph in networkX format.
    
    returns:
          D:      The diameter, which is the largest diameter among all the sub
                  components.
    '''

    subD = []
    subGs = list(nx.connected_component_subgraphs(G))
    indsubG = 1
    for H in subGs:
        if len(H.nodes())>1:
            print('D: Subgraph '+str(indsubG)+' with '+str(len(H.nodes()))+' nodes')
            subD.append(nx.diameter(H))
        else:
            subD.append(0)
        indsubG += 1
    # returning the maximum diameter among all the sub components
    D = np.max(subD)
    return D
def topologyNetx(gestore):
    adiacenza = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(gestore),delimiter=',',dtype='int') 
    grafo = networkx.Graph(adiacenza)
    c = networkx.average_clustering(grafo)
    d = networkx.diameter(grafo)
    l = networkx.average_shortest_path_length(grafo)
    return c, d, l
예제 #29
0
def get_diameters(graph):
    connected_components = nx.connected_component_subgraphs(graph)
    print "number of connected components: ", len(connected_components)
    diameters = []
    for subgraph in connected_components:
        diameters.append((len(subgraph), nx.diameter(subgraph)))
    print "diameters: ", diameters
예제 #30
0
def get_nations_network_by_year(year):
  cursor = get_db().cursor()
  cursor.execute("""SELECT reporting, reporting_slug, partner, partner_slug, Flow, expimp,
                    reporting_continent, partner_continent,reporting_type,partner_type
                    FROM flow_joined
                    WHERE reporting NOT LIKE "Worl%%"
                    AND partner NOT LIKE "Worl%%"
                    AND Flow != "null"
                    AND year = %s
                    """%(year)
              )

  table = [list(r) for r in cursor]

  json_sql_response=[]

  for row in table:
    json_sql_response.append({
      "reporting": row[0],
      "reporting_id": row[1],
      "partner": row[2],
      "partner_id": row[3],
      "flow": row[4],
      "expimp": row[5],
      "reporting_continent": row[6],
      "partner_continent": row[7],
      "reporting_type": row[8],
      "partner_type": row[9]
      })


  # Create a graph instance
  G=nx.Graph()

  nodes = []
  for row in table:
    nodes.append(row[1])
    nodes.append(row[3])
    # add edge to the graph
    G.add_edge(row[1], row[3])

  nodes = set(nodes)

  # add nodes to graph
  G.add_nodes_from(nodes)
  if len(G.nodes())>0:
    stats = {
      "average_clustering": nx.average_clustering(G),
      "center": nx.center(G),
      "diameter": nx.diameter(G),
      "eccentricity": nx.eccentricity(G)
    }
  else:
    stats=[]
  json_response = {}
  json_response["stats"] = stats
  json_response["network"] = json_sql_response

  return json.dumps(json_response,encoding="UTF8")
예제 #31
0
    def makeMeasures(self, network, exclude):
        """Make the network measures"""
        # fazer condicional para cada medida, se não estiver na exclude[],
        # fazer medida de tempo e guardar como tupla no

        g = network.g
        gu = network.gu
        timings = []

        T = t.time()
        self.N = network.g.number_of_nodes()
        self.E = network.g.number_of_edges()
        self.E_ = network.gu.number_of_edges()
        self.edges = g.edges(data=True)
        self.nodes = g.nodes(data=True)
        timings.append((t.time() - T, "edges and nodes"))

        T = t.time()
        self.degrees = g.degree()
        self.nodes_ = sorted(g.nodes(), key=lambda x: self.degrees[x])
        self.degrees_ = [self.degrees[i] for i in self.nodes_]
        self.in_degrees = g.in_degree()
        self.in_degrees_ = [self.in_degrees[i] for i in self.nodes_]
        self.out_degrees = g.out_degree()
        self.out_degrees_ = [self.out_degrees[i] for i in self.nodes_]
        timings.append((t.time() - T, "in_out_total_degrees"))

        T = t.time()
        self.strengths = g.degree(weight="weight")
        self.nodes__ = sorted(g.nodes(), key=lambda x: self.strengths[x])
        self.strengths_ = [self.strengths[i] for i in self.nodes_]
        self.in_strengths = g.in_degree(weight="weight")
        self.in_strengths_ = [self.in_strengths[i] for i in self.nodes_]
        self.out_strengths = g.out_degree(weight="weight")
        self.out_strengths_ = [self.out_strengths[i] for i in self.nodes_]
        timings.append((t.time() - T, "in_out_total_strengths"))

        # symmetry measures
        self.asymmetries = asymmetries = []
        self.disequilibrium = disequilibriums = []
        self.asymmetries_edge_mean = asymmetries_edge_mean = []
        self.asymmetries_edge_std = asymmetries_edge_std = []
        self.disequilibrium_edge_mean = disequilibrium_edge_mean = []
        self.disequilibrium_edge_std = disequilibrium_edge_std = []
        for node in self.nodes_:
            if not self.degrees[node]:
                asymmetries.append(0.)
                disequilibriums.append(0.)
                asymmetries_edge_mean.append(0.)
                asymmetries_edge_std.append(0.)
                disequilibrium_edge_mean.append(0.)
                disequilibrium_edge_std.append(0.)
            else:
                asymmetries.append(
                    (self.in_degrees[node] - self.out_degrees[node]) /
                    self.degrees[node])
                disequilibriums.append(
                    (self.in_strengths[node] - self.out_strengths[node]) /
                    self.strengths[node])
                edge_asymmetries = ea = []
                edge_disequilibriums = ed = []
                predecessors = g.predecessors(node)
                successors = g.successors(node)
                for pred in predecessors:
                    if pred in successors:
                        ea.append(0.)
                        ed.append(
                            (g[pred][node]['weight'] - g[node][pred]['weight'])
                            / self.strengths[node])
                    else:
                        ea.append(1.)
                        ed.append(g[pred][node]['weight'] /
                                  self.strengths[node])
                for suc in successors:
                    if suc in predecessors:
                        pass
                    else:
                        ea.append(-1.)
                        ed.append(-g[node][suc]['weight'] /
                                  self.strengths[node])
                asymmetries_edge_mean.append(n.mean(ea))
                asymmetries_edge_std.append(n.std(ea))
                disequilibrium_edge_mean.append(n.mean(ed))
                disequilibrium_edge_std.append(n.std(ed))

        if "weighted_directed_betweenness" not in exclude:
            T = t.time()
            self.weighted_directed_betweenness = x.betweenness_centrality(
                g, weight="weight")
            self.weighted_directed_betweenness_ = [
                self.weighted_directed_betweenness[i] for i in self.nodes_
            ]
            timings.append((t.time() - T, "weighted_directed_betweenness"))
        if "unweighted_directed_betweenness" not in exclude:
            T = t.time()
            self.unweighted_directed_betweenness = x.betweenness_centrality(g)
            timings.append((t.time() - T, "unweighted_directed_betweenness"))
        if "weighted_undirected_betweenness" not in exclude:
            T = t.time()
            self.weighted_undirected_betweenness = x.betweenness_centrality(
                gu, weight="weight")
            timings.append((t.time() - T, "weighted_undirected_betweenness"))
        if "unweighted_undirected_betweenness" not in exclude:
            T = t.time()
            self.weighted_undirected_betweenness = x.betweenness_centrality(gu)
            timings.append((t.time() - T, "unweighted_undirected_betweenness"))
        if "weiner" not in exclude:
            T = t.time()
            self.weiner = x.vitality.weiner_index(g, weight="weight")
            timings.append((t.time() - T, "weiner"))
        if "closeness" not in exclude:
            T = t.time()
            self.closeness = x.vitality.closeness_vitality(g, weight="weight")
            timings.append((t.time() - T, "closeness"))
        if "transitivity" not in exclude:
            T = t.time()
            self.transitivity = x.transitivity(g)
            timings.append((t.time() - T, "transitivity"))
        if "rich_club" not in exclude:
            T = t.time()
            self.rich_club = x.rich_club_coefficient(gu)
            timings.append((t.time() - T, "rich_club"))

        if "weighted_clustering" not in exclude:
            T = t.time()
            self.weighted_clusterings = x.clustering(network.gu,
                                                     weight="weight")
            self.weighted_clusterings_ = [
                self.weighted_clusterings[i] for i in self.nodes_
            ]
            timings.append((t.time() - T, "weighted_clustering"))
        if "clustering" not in exclude:
            T = t.time()
            self.clusterings = x.clustering(network.gu)
            self.clusterings_ = [self.clusterings[i] for i in self.clusterings]
            timings.append((t.time() - T, "clustering"))
        if "triangles" not in exclude:
            T = t.time()
            self.triangles = x.triangles(gu)
            timings.append((t.time() - T, "clustering"))
        if "n_weakly_connected_components" not in exclude:
            T = t.time()
            self.n_weakly_connected_components = x.number_weakly_connected_components(
                g)
            timings.append((t.time() - T, "n_weakly_connected_components"))
        if "n_strongly_connected_components" not in exclude:
            T = t.time()
            self.n_strongly_connected_components = x.number_strongly_connected_components(
                g)
            timings.append((t.time() - T, "n_strongly_connected_components"))
        T = t.time()
        foo = [i for i in x.connected_component_subgraphs(gu)]
        bar = sorted(foo, key=lambda x: x.number_of_nodes(), reverse=True)
        self.component = c = bar[0]
        timings.append((t.time() - T, "component"))
        T = t.time()
        self.diameter = x.diameter(c)
        self.radius = x.radius(c)
        self.center = x.center(c)
        self.periphery = x.periphery(c)
        timings.append((t.time() - T, "radius_diameter_center_periphery"))
        self.timings = timings

        T = t.time()
        self.n_connected_components = x.number_connected_components(gu)
        nodes = []
        nodes_components = [
            foo.nodes() for foo in x.connected_component_subgraphs(gu)
        ][:1]
        for nodes_ in nodes_components:
            nodes += nodes_
        self.periphery_ = nodes
        self.timings = timings
예제 #32
0
# shortest path (demo)
print("########### shortest path (demo)")
sample_shortest_path = nx.shortest_path(G, source="jus", target="doctor")
print("Shortest path between jus and something:", sample_shortest_path)
print("Length of that shortest path: ", len(sample_shortest_path)-1)
# there are other shortest path methods; see Ladd et al tutorial
# diameter # only works on connected graphs

# connected components
print("########### connectedness #####")
print(nx.is_connected(G)) # learn if graph is connected
components = nx.connected_components(G) # get list of components
largest_component = max(components, key=len) # use max() command to find largest one
subgraph = G.subgraph(largest_component) # create subgraph of just the largest component
diameter = nx.diameter(subgraph) # then calculate diameter of subgraph
print("Network diameter of largest component:", diameter)

# triads
print("########### triads ############")
triadic_closure = nx.transitivity(G)
print("Triadic closure:", triadic_closure)
print("for each node, the number of triangles which include it")
for node in G.nodes:
	print(node,":",nx.triangles(G, node)) # gives the number of triangles which include node n as a vertex

# triangle weight
"""
pseudo code
for a given node
if there are 2+ edges
예제 #33
0
#nx.draw_circular(G)

nx.degree(G)

#si quisieramos crear otro df que muestre los nodos y su numero de conexiones usamos:
conexion = {}
for x in G.nodes:
    conexion[x] = len(G[x])
s = pd.Series(conexion, name='Conexiones')
df2 = s.to_frame().sort_values('Conexiones', ascending=False)
#%%
# Density
nx.density(G)

# Clustering
nx.clustering(G)
# Similar al comando anterior
for i in nx.clustering(G).items():
    print(i)

# Average clustering
nx.average_clustering(G)

# Diameter
nx.diameter(G)
diameter = nx.diameter(net.to_undirected())
# %%

# %%
# https://www.youtube.com/watch?v=px7ff2_Jeqw
#  Rob Chew, Peter Baumgartner | Connected: A Social Network Analysis Tutorial with NetworkX
        nodes = list(range(n*n)) # uncomment for 2D lattice network
        
        for i in range(0, n*n):
            for j in range(i, n*n):
                if G.has_edge(i, j):
                    if random.random() < p_rew:
                        w = random.choice(nodes)
                        while w == i or G.has_edge(i, w):
                            w = random.choice(nodes)
                        G.remove_edge(i,j)
                        G.add_edge(i,w)

    # periphery of the graph    
    per = nx.periphery(G)
    # diameter of the graph
    diam = nx.diameter(G)
    
    # pick to nodes in the periphery of the graph
    input_nodes = []
    output_nodes = []    
    for i in range(0,n*n):
        for j in range(0,n*n):
            if nx.shortest_path_length(G,i,j) == diam:
                input_nodes.append(i)
                output_nodes.append(j)
                        
    input_nodes = [input_nodes[0]]
    output_nodes = [output_nodes[0]] 
    
    # Define ouput matrix C
    C = I
예제 #35
0
    def extract(self, key, g, giant=True):
        '''
        Extract network statistics from a social network, g.
        '''
        if giant:
            g = g.subgraph(max(nx.connected_components(g), key=len)).copy()

        self.num_nodes.append(nx.number_of_nodes(g))

        self.key_list.append(key)

        n = nx.number_of_nodes(g)

        # Social network
        # Extract basic network stats
        self.num_edges.append(nx.number_of_edges(g))
        self.ave_deg.append(2 * nx.number_of_edges(g) / n)
        self.density.append(nx.density(g))
        self.ave_clust.append(nx.average_clustering(g))

        # Calculate network stats that require G is connected.
        if nx.is_connected(g):
            self.ave_dist.append(nx.average_shortest_path_length(g))
            self.diameter.append(nx.diameter(g))
        else:
            self.ave_dist.append(np.nan)
            self.diameter.append(np.nan)

        # Network with only supported edges (ex-post IC)
        g_supp = removeUnsupportedEdges(g)

        self.links_supported.append(
            nx.number_of_edges(g_supp) / nx.number_of_edges(g))

        # Bipartite network to maximize strategyproof information
        # Just records the nodes

        if (n <= 20) & giant:
            g_bi = max_bi_information(g)
        else:
            g_bi = {'max_bi_info': np.nan, 'max_bi_info_nodes': {}}

        # Information measures
        self.info_total.append(InformationMeasure(g))
        self.info_total_friend_only.append(
            InformationMeasure(g, include_self_comparisons=False))
        self.info_expostIC.append(
            InformationMeasure(g_supp, include_self_comparisons=False))
        self.info_SP.append(g_bi['max_bi_info'])

        # Classify comparisons
        comp = classifyComparisons(g)
        self.comp_total.append(comp["Total"] / (n * (n - 1)))
        self.comp_supp.append(comp["Supported"] / (n * (n - 1)))
        self.comp_trans.append(comp["Transitive"] / (n * (n - 1)))
        self.comp_by_three.append(comp["By three"] / (n * (n - 1)))

        # Save the network in a dictionary so we can plot it later
        self.networks[str(key)] = {'g': g, 'g_supp': g_supp, 'g_bi': g_bi}

        print("Loading network #" + str(key),
              end="\r")  # Counter to see progress on network extraction
예제 #36
0
def topographic_metrics(wn):
    # Get the WaterNetworkModel graph
    G = wn.get_graph()

    # Print general topographic information
    print(nx.info(G))

    # Plot node and edge attributes.
    junction_attr = wn.query_node_attribute('elevation',
                                            node_type=wntr.network.Junction)
    pipe_attr = wn.query_link_attribute('length', link_type=wntr.network.Pipe)
    wntr.graphics.plot_network(wn,
                               node_attribute=junction_attr,
                               link_attribute=pipe_attr,
                               title='Node elevation and pipe length',
                               node_size=40,
                               link_width=2)

    # Compute link density
    print("Link density: " + str(nx.density(G)))

    # Compute node degree
    node_degree = dict(G.degree())
    wntr.graphics.plot_network(wn,
                               node_attribute=node_degree,
                               title='Node Degree',
                               node_size=40,
                               node_range=[1, 5])

    # Compute number of terminal nodes
    terminal_nodes = G.terminal_nodes()
    wntr.graphics.plot_network(wn,
                               node_attribute=terminal_nodes,
                               title='Terminal nodes',
                               node_size=40,
                               node_range=[0, 1])
    print("Number of terminal nodes: " + str(len(terminal_nodes)))
    print("   " + str(terminal_nodes))

    # Compute pipes with diameter > threshold
    diameter = 0.508  # m (20 inches)
    pipes = wn.query_link_attribute('diameter', np.greater, diameter)
    wntr.graphics.plot_network(wn,
                               link_attribute=list(pipes.keys()),
                               title='Pipes > 20 inches',
                               link_width=2,
                               link_range=[0, 1])
    print("Number of pipes > 20 inches: " + str(len(pipes)))
    print("   " + str(pipes))

    # Compute nodes with elevation <= treshold
    elevation = 1.524  # m (5 feet)
    nodes = wn.query_node_attribute('elevation', np.less_equal, elevation)
    wntr.graphics.plot_network(wn,
                               node_attribute=list(nodes.keys()),
                               title='Nodes <= 5 ft elevation',
                               node_size=40,
                               node_range=[0, 1])
    print("Number of nodes <= 5 ft elevation: " + str(len(nodes)))
    print("   " + str(nodes))

    # Compute eccentricity, diameter, and average shortest path length
    # These all use an undirected graph
    uG = G.to_undirected()  # undirected graph
    if nx.is_connected(uG):
        ecc = nx.eccentricity(uG)
        wntr.graphics.plot_network(wn,
                                   node_attribute=ecc,
                                   title='Eccentricity',
                                   node_size=40,
                                   node_range=[15, 30])

        print("Diameter: " + str(nx.diameter(uG)))

        ASPL = nx.average_shortest_path_length(uG)
        print("Average shortest path length: " + str(ASPL))

    # Compute cluster coefficient
    clust_coefficients = nx.clustering(nx.Graph(G))
    wntr.graphics.plot_network(wn,
                               node_attribute=clust_coefficients,
                               title='Clustering Coefficient',
                               node_size=40)

    # Compute betweenness centrality
    bet_cen = nx.betweenness_centrality(G)
    wntr.graphics.plot_network(wn,
                               node_attribute=bet_cen,
                               title='Betweenness Centrality',
                               node_size=40,
                               node_range=[0, 0.4])
    central_pt_dom = G.central_point_dominance()
    print("Central point dominance: " + str(central_pt_dom))

    # Compute articulation points
    Nap = list(nx.articulation_points(uG))
    Nap = list(set(Nap))  # get the unique nodes in Nap
    Nap_density = float(len(Nap)) / uG.number_of_nodes()
    print("Density of articulation points: " + str(Nap_density))
    wntr.graphics.plot_network(wn,
                               node_attribute=Nap,
                               title='Articulation Point',
                               node_size=40,
                               node_range=[0, 1])

    # Compute bridges
    bridges = G.bridges()
    wntr.graphics.plot_network(wn,
                               link_attribute=bridges,
                               title='Bridges',
                               link_width=2,
                               link_range=[0, 1])
    Nbr_density = float(len(bridges)) / G.number_of_edges()
    print("Density of bridges: " + str(Nbr_density))

    # Compute spectal gap
    spectral_gap = G.spectral_gap()
    print("Spectal gap: " + str(spectral_gap))

    # Compute algebraic connectivity
    alg_con = G.algebraic_connectivity()
    print("Algebraic connectivity: " + str(alg_con))

    # Critical ratio of defragmentation
    fc = G.critical_ratio_defrag()
    print("Critical ratio of defragmentation: " + str(fc))

    # Compute closeness centrality
    clo_cen = nx.closeness_centrality(G)
    wntr.graphics.plot_network(wn,
                               node_attribute=clo_cen,
                               title='Closeness Centrality',
                               node_size=40)
예제 #37
0
def computeIndicators(committerGraphs, repoReferences, useWeights=False):
   
    projectNames = [] # to host project names
    committersNumbers = [] # to host numbers of committers 
    completenessIndex = [] # to host graph completeness values
    centralizationIndex = [] # to host the graph centrality values 
    clusteringIndex = [] # to host the graph modularity values 
    averagePLs = [] # to host average path lenghts
    diameters = [] # to host diameters
    
    #########################################################################################
    ## Analyse the number of edges in respect to the number of committers ###################
    #########################################################################################
    #indicators are only available for unidirected graphs
    
    for committerGraph, file in zip(committerGraphs, repoReferences):

        # convert directed graph into unidirected graph
        if nx.is_directed(committerGraph):
            committerGraph_undirected = nx.to_undirected(committerGraph) 
            committerGraph_undirected = nx.Graph(committerGraph_undirected)
        else: 
            committerGraph_undirected = committerGraph
        
        # add project to the list and get the corresponding number of committers 
        projectNames.append(file)
        numberOfCommitters = nx.number_of_nodes(committerGraph_undirected) # number of committers in this project
        committersNumbers.append(numberOfCommitters)

        # Completeness
        # ----------
        # if there is more than one committer, we can calculate a completeness
        if numberOfCommitters > 1:
            # calculate the completeness of the graph, that is, the position in the scale between:
            #  - 0 edge (the graph is entirely disconnected)
            #  - n*(n-1), where n is the number of nodes (each node is connected with all other nodes)
            scaleMin = 0
            scaleMax = len(committerGraph_undirected)*(len(committerGraph_undirected)-1)/2
            completenessIndex.append((nx.number_of_edges(committerGraph_undirected)-scaleMin)/(scaleMax-scaleMin))
        else:
            completenessIndex.append(float('nan')) # no Completeness value can be calculated
        
        '''
        Centrality
        ----------
        after "Social Network Analysis: Methods and Applications", Stanley Wasserman, Katherine Faust
        --> Degree is the number of nodes that a focal node is connected to, 
            and measures the involvement of the node in the network
        https://books.google.de/books?id=CAm2DpIqRUIC&printsec=frontcover&redir_esc=y#v=onepage&q=Centrality&f=false
        https://cs.brynmawr.edu/Courses/cs380/spring2013/section02/slides/05_Centrality.pdf
        '''
        
        # get degree of every node
        degree = nx.degree(committerGraph_undirected)
        
        # By Freeman (1977) definition:
        if ((numberOfCommitters-1)*(numberOfCommitters-2)) is not 0 and len(degree) is not 0:
            # maximum degree
            c_star = max([x[1] for x in degree])
            av = sum([c_star-abs(c_a) for c_a in [x[1] for x in degree]])/((numberOfCommitters-1)*(numberOfCommitters-2))
            centralizationIndex.append(av)
        else:
            centralizationIndex.append(float('nan')) # no Centrality value can be calculated
            
        # Modularity / Clustering
        # ----------
        # after "Generalizations of the clustering coefficient to weighted complex networks", 
        # J. Saramäki, M. Kivelä, J.-P. Onnela, K. Kaski, and J. Kertész,
        # Physical Review E, 75 027105 (2007). 
        # http://jponnela.com/web_documents/a9.pdf
        # Corresponding Networkx function:
        # https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.cluster.clustering.html?highlight=clustering#networkx.algorithms.cluster.clustering
        
        # Note: Self loops are ignored in nx.clustering function!
        if useWeights:
            clustering_coefficients = nx.clustering(committerGraph_undirected, weight='weight')
        else:
            clustering_coefficients = nx.clustering(committerGraph_undirected)
            
        # average over all clustering_coefficients
        if len(clustering_coefficients) is not 0:
            clusteringIndex.append(
                sum(clustering_coefficients.values())/len(clustering_coefficients))
        else:
            clusteringIndex.append(float('nan')) # no Modularity value can be calculated

        try:
            averagePLs.append(nx.average_shortest_path_length(committerGraph_undirected))
        except nx.exception.NetworkXError as err:
            averagePLs.append('nan')
        except nx.exception.NetworkXPointlessConcept as err:
            averagePLs.append('nan')
            
        try:
            diameters.append(nx.diameter(committerGraph_undirected))
        except nx.exception.NetworkXError as err:
            diameters.append('nan')
        except ValueError as err:
            diameters.append('nan')
            
        
    return committersNumbers, completenessIndex, centralizationIndex, clusteringIndex, averagePLs, diameters
예제 #38
0
# #### 社会网络图的布局
nx.draw(nG,pos=nx.circular_layout(nG), with_labels=True)
nx.draw(nG,pos=nx.kamada_kawai_layout(nG), with_labels=True)
nx.draw(nG,pos=nx.random_layout(nG), with_labels=True)
nx.draw(nG,pos=nx.spectral_layout(nG), with_labels=True)

### 网络统计量
#### 网络汇总描述
nx.info(nG)

# #### 密度
nx.density(nG)

# #### 直径
nx.diameter(nG)

#### 聚类系数与相邻节点
nx.transitivity(nG)
nx.clustering(nG)
list(nG.neighbors('ATL'))

# #### 中心性
nx.degree_centrality(nG)
nx.betweenness_centrality(nG)
nx.closeness_centrality(nG)

# #### 最短路径
len(nx.shortest_path(nG,'ATL','SFO'))

# ### 知识图谱应用
def answer_eight():
    G_sc = answer_six()

    return nx.diameter(G_sc)  # Your Answer Here
예제 #40
0
	G = G.to_undirected()
	print "Year "+str(x)+":"
	print "Number of nodes:", G.number_of_nodes()
	print "Number of isolates:", len(nx.isolates(G))
	G.remove_nodes_from(nx.isolates(G))
	print "Number of nodes after removing isolates:", G.number_of_nodes()
	#print "Graph connected?:", nx.is_connected(G)
	#print "Number of connected components:", nx.number_connected_components(G)
	components = sorted(nx.connected_components(G), key = len, reverse=True)
	#print "Number of connected components:", len(components)
	component1 = G.subgraph(components[0])
	component2 = G.subgraph(components[1])
	component3 = G.subgraph(components[2])
	component4 = G.subgraph(components[3])
	component5 = G.subgraph(components[4])
	component1diameter.append(nx.diameter(component1))
	component2diameter.append(nx.diameter(component2))
	component3diameter.append(nx.diameter(component3))
	component4diameter.append(nx.diameter(component4))
	component5diameter.append(nx.diameter(component5))
	year.append(x)
print component1diameter
print component2diameter
print component3diameter
print component4diameter
print component5diameter
print

plt.figure()
plt.ylim(0,50)
plt.xlim(1974,2006)
예제 #41
0
  ds = [4] + [3 for _ in range(8)]
elif n == 10:
  ds = [3 for _ in range(10)]
elif n == 11:
  ds = [4 for _ in range(3)] + [3 for _ in range(8)]
elif n == 12:
  ds = [4 for _ in range(10)] + [3 for _ in range(2)]
elif n == 13:
  ds = [4 for _ in range(9)] + [3 for _ in range(4)]

edges = sum(ds)/2
  

while 1:
  G=nx.gnm_random_graph(n,edges)
  if nx.is_connected(G):
    degrees = list(G.degree().values())
    degrees.sort(reverse=True)
    flag = True
    for i in range(len(ds)):
      if degrees[i] != ds[i]:
        flag = False
        break
    if flag:
      diameter = nx.diameter(G)
      if diameter == 2:
        print(G)
        nx.draw(G)
        plt.show()
        exit()
예제 #42
0
def _compute_random_network_properties(network_name, n, p):
    G = nx.gnp_random_graph(n, p)
    expected_no_of_edges = random_network_analyzer.calculate_no_of_edges(n, p)
    expected_average_degree = random_network_analyzer.calculate_average_degree(
        n, p)
    expected_degree_distribution_plot_file_name = random_network_analyzer.calculate_degree_prob_distribution(
        network_name, n, p)

    distance_distribution = get_distance_distribution(G)
    distance_prob_distribution = calculate_distance_prob_distribution(
        distance_distribution)
    # distance_prob_distribution_plot_file = plot_and_store_distance_prob_distribution("Random ",distance_prob_distribution)
    # degree_centrality_interactive_graph = random_network_analyzer.plot_degree_centrality(G)
    # betweeness_interactive_graph = random_network_analyzer.plot_betweeness(G)
    # closeness_interactive_graph = random_network_analyzer.plot_closeness(G)
    expected_regime_type = random_network_analyzer.get_regime_type(n, p)
    expected_clustering_coefficient = random_network_analyzer.calculate_clustering_coefficient(
        p)
    # interactive_network_plot = plot_random_interactive_network(n, p)
    print('p ' + str(p))
    print('expected_no_of_nodes ' + str(n))
    print('expected_no_of_edges ' + str(expected_no_of_edges))
    print('expected_average_degree ' + str(expected_average_degree))
    print('expected_regime_type ' + str(expected_regime_type))
    print('expected_clustering_coefficient ' +
          str(expected_clustering_coefficient))

    if (nx.is_connected(G)):
        diameter = nx.diameter(G)
        average_distance = nx.average_shortest_path_length(G)

        print('p ' + str(p))
        print('expected_no_of_nodes ' + str(n))
        print('expected_no_of_edges ' + str(expected_no_of_edges))
        print('expected_average_degree ' + str(expected_average_degree))
        print('expected_regime_type ' + str(expected_regime_type))
        print('expected_clustering_coefficient ' +
              str(expected_clustering_coefficient))
        print('expected_diameter ' + str(diameter))
        print('expected_average_distance ' + str(average_distance))

        return {
            'p': p,
            'expected_no_of_nodes': n,
            'expected_no_of_edges': expected_no_of_edges,
            'expected_average_degree': expected_average_degree,
            'expected_regime_type': expected_regime_type,
            'expected_clustering_coefficient': expected_clustering_coefficient,
            'expected_diameter': diameter,
            'expected_average_distance': average_distance,
            'expected_degree_distribution_plot_file_name':
            expected_degree_distribution_plot_file_name,
            'expected_distance_distribution_plot_file_name':
            distance_prob_distribution_plot_file,
            'degree_centrality_interactive_graph':
            degree_centrality_interactive_graph,
            'betweeness_interactive_graph': betweeness_interactive_graph,
            'closeness_interactive_graph': closeness_interactive_graph,
            'interactive_network_plot': interactive_network_plot
        }
    else:

        print('p ' + str(p))
        print('expected_no_of_nodes ' + str(n))
        print('expected_no_of_edges ' + str(expected_no_of_edges))
        print('expected_average_degree ' + str(expected_average_degree))
        print('expected_regime_type ' + str(expected_regime_type))
        print('expected_clustering_coefficient ' +
              str(expected_clustering_coefficient))

        return {
            'p': p,
            'expected_no_of_nodes': n,
            'expected_no_of_edges': expected_no_of_edges,
            'expected_average_degree': expected_average_degree,
            'expected_regime_type': expected_regime_type,
            'expected_clustering_coefficient': expected_clustering_coefficient,
            'expected_degree_distribution_plot_file_name':
            expected_degree_distribution_plot_file_name,
            'expected_distance_distribution_plot_file_name':
            distance_prob_distribution_plot_file,
            'degree_centrality_interactive_graph':
            degree_centrality_interactive_graph,
            'betweeness_interactive_graph': betweeness_interactive_graph,
            'closeness_interactive_graph': closeness_interactive_graph,
            'interactive_network_plot': interactive_network_plot
        }
예제 #43
0
            link1 = random.choice(range(len(A[k])))
            link2 = random.choice(range(link1 + 2,
                                        link1 + 2 + len(A[k]) - 3)) % len(A[k])
            #if flatten(A).count(A[k][link1])<6 and flatten(A).count(A[k][link2])<6:
            #H.add_edge(A[k][link1],A[k][link2])
            H.add_edge(A[k][link1], A[k][link2])

            Als = sum([len(x) * (len(x) - 3) / 2 for x in A])
            #print alen
            ll = sum([(-1.5 + np.sqrt(1.5**2 + 2 * ii) + 3) * alen.count(ii) *
                      ii / float(Als) for ii in looplens])
            #print ll
            #ll=sum([len(x)*len(x)*(len(x)-3) for x in A])//Als
            p[i][j] = (4 + ll) / float(3 * ll)

        Diam[j] = nx.diameter(H)
        D = nx.Graph()
        if len(A) > 1:
            D.add_nodes_from(range(len(A)))
            for l in range(len(A)):
                for i in range(l + 1, len(A)):
                    #if len([val for val in A[l] if val in A[i]])>0:
                    if len([val for val in A[l] if val in A[i]]) > 1:
                        D.add_edge(l, i)
            Diam2[j] = nx.diameter(D)
            Vol[j] = diameter_size(D)

    #meandias[nrange-mini,0]=n
    #meandias[nrange-mini,1]=np.mean(Diam)
    #meandias2[nrange-mini,0]=n
    #meandias2[nrange-mini,1]=np.mean(Diam2)
예제 #44
0
def extract_activity_graph(G, activity_uri, activity_id):
    emit(u"Extracting ego graph for {}".format(activity_id))
    app.logger.info(u"Extracting graph for {} ({})".format(
        activity_uri, activity_id))

    try:
        sG = extract_ego_graph(G, activity_uri)
    except Exception as e:
        emit(
            u"Could not extract ego graph for {}/{} (Bug in NetworkX?)".format(
                activity_id, activity_uri))
        app.logger.warning(
            u"Could not extract ego graph for {}/{} (Bug in NetworkX?)".format(
                activity_id, activity_uri))
        app.logger.warning(e.message)
        app.logger.warning(e)
        return

    app.logger.debug("Original graph: {} nodes\nEgo graph: {} nodes".format(
        len(G.nodes()), len(sG.nodes())))

    # Set node type for the activity_uri to 'origin'
    sG.node[activity_uri]['type'] = 'origin'

    app.logger.debug(u"Assigning weights to edges")
    emit("Assigning weights to edges")

    # Get start and end nodes (those without incoming or outgoing edges, respectively)
    start_nodes = [n for n in sG.nodes() if sG.in_degree(n) == 0]
    end_nodes = [n for n in sG.nodes() if sG.out_degree(n) == 0]

    edge_weights = {}
    try:
        # Walk all edges, and assign weights
        edge_weights = walk_weights(graph=sG,
                                    pending_nodes=set(start_nodes),
                                    edge_weights={},
                                    visited=set())
    except Exception as e:
        emit("ERROR: Provenance trace contains cycles: {}".format(e.message))
        app.logger.error("ERROR: Provenance trace contains cycles: {}".format(
            e.message))
        raise e

    # Check to make sure that the edge weights dictionary has the same number of keys as edges in the ego graph
    app.logger.debug("Check {}/{}".format(len(edge_weights.keys()),
                                          len(sG.edges())))
    nx.set_edge_attributes(sG, 'value', edge_weights)
    del (edge_weights)
    # nx.set_node_attributes(sG,'value',node_weights)

    # Convert to JSON
    g_json = json_graph.node_link_data(sG)  # node-link format to serialize

    # Get number of start and end nodes to determine ideal width of the viewport
    start_nodes = len(start_nodes)
    end_nodes = len(end_nodes)
    max_degree = 1
    for n, d in sG.nodes(data=True):
        if sG.in_degree(n) > max_degree:
            max_degree = sG.in_degree(n)
        if sG.out_degree(n) > max_degree:
            max_degree = sG.out_degree(n)

    # Set width to the largest of # end nodes, # start nodes, or the maximum degree
    width = max([end_nodes, start_nodes, max_degree])

    app.logger.debug(u"Computing graph diameter {} ({})".format(
        activity_uri, activity_id))
    try:
        diameter = nx.diameter(sG.to_undirected())
    except Exception:
        app.logger.warning(
            "Could not determine diameter, setting to arbitrary value of 25")
        emit("Could not determine diameter, setting to arbitrary value of 25")
        diameter = 25

    types = len(set(nx.get_node_attributes(sG, 'type').values()))

    if types > 11:
        types = 11
    elif types < 3:
        types = 3

    app.logger.debug(u"Done extracting graph for {} ({})".format(
        activity_uri, activity_id))
    return g_json, width, types, diameter
예제 #45
0
def obca(g):
    diameter = nx.diameter(g)
    lb_max = diameter + 1

    # Rank the nodes according to their degree
    results = nx.degree_centrality(g)
    nodes = next(zip(*sorted(results.items(), key=operator.itemgetter(1))))
    results = dict()

    results[1] = [[node] for node in g.nodes()]
    for lb in range(2, lb_max):
        covered_frequency = [0] * len(g.nodes())
        boxes = list()

        for i in range(0, len(nodes)):
            node = nodes[i]

            if covered_frequency[i] > 0:
                continue

            box = list(nx.single_source_shortest_path_length(g, node, lb-1).keys())

            # Verify that all paths within the box have the length less then lb
            index = 0
            while True:
                node = box[index]
                j = index+1

                while j < len(box):
                    neighbor = box[j]

                    if nx.shortest_path_length(g, node, neighbor) >= lb:
                        box.remove(neighbor)

                    j += 1

                index += 1
                if index >= len(box):
                    break

            for node in box:
                node_index = nodes.index(node)
                covered_frequency[node_index] += 1

            boxes.append(box)

        for box in boxes:
            redundant_box = True

            for node in box:
                node_index = nodes.index(node)
                if covered_frequency[node_index] == 1:
                    redundant_box = False
                    break

            if redundant_box:
                for node in box:
                    node_index = nodes.index(node)
                    covered_frequency[node_index] -= 1
                boxes.remove(box)

        # print("lb: {}, boxes: {}, cf: {}".format(lb, boxes, covered_frequency))
        results[lb] = boxes

    temp = list()
    temp.append(g.nodes())
    results[lb_max] = temp

    # print(results)
    return results
예제 #46
0
def extended_stats(G,
                   connectivity=False,
                   anc=False,
                   ecc=False,
                   bc=False,
                   cc=False):
    """
    Calculate extended topological stats and metrics for a graph.

    Many of these algorithms have an inherently high time complexity. Global
    topological analysis of large complex networks is extremely time consuming
    and may exhaust computer memory. Consider using function arguments to not
    run metrics that require computation of a full matrix of paths if they
    will not be needed.

    Parameters
    ----------
    G : networkx.MultiDiGraph
        input graph
    connectivity : bool
        if True, calculate node and edge connectivity
    anc : bool
        if True, calculate average node connectivity
    ecc : bool
        if True, calculate shortest paths, eccentricity, and topological
        metrics that use eccentricity
    bc : bool
        if True, calculate node betweenness centrality
    cc : bool
        if True, calculate node closeness centrality

    Returns
    -------
    stats : dict
        dictionary of network measures containing the following elements (some
        only calculated/returned optionally, based on passed parameters):

          - avg_neighbor_degree
          - avg_neighbor_degree_avg
          - avg_weighted_neighbor_degree
          - avg_weighted_neighbor_degree_avg
          - degree_centrality
          - degree_centrality_avg
          - clustering_coefficient
          - clustering_coefficient_avg
          - clustering_coefficient_weighted
          - clustering_coefficient_weighted_avg
          - pagerank
          - pagerank_max_node
          - pagerank_max
          - pagerank_min_node
          - pagerank_min
          - node_connectivity
          - node_connectivity_avg
          - edge_connectivity
          - eccentricity
          - diameter
          - radius
          - center
          - periphery
          - closeness_centrality
          - closeness_centrality_avg
          - betweenness_centrality
          - betweenness_centrality_avg

    """
    stats = dict()

    # create DiGraph from the MultiDiGraph, for those metrics that need it
    D = utils_graph.get_digraph(G, weight="length")

    # create undirected Graph from the DiGraph, for those metrics that need it
    Gu = nx.Graph(D)

    # get largest strongly connected component, for those metrics that require
    # strongly connected graphs
    Gs = utils_graph.get_largest_component(G, strongly=True)

    # average degree of the neighborhood of each node, and average for the graph
    avg_neighbor_degree = nx.average_neighbor_degree(G)
    stats["avg_neighbor_degree"] = avg_neighbor_degree
    stats["avg_neighbor_degree_avg"] = sum(
        avg_neighbor_degree.values()) / len(avg_neighbor_degree)

    # avg weighted degree of neighborhood of each node, and average for graph
    avg_wtd_nbr_deg = nx.average_neighbor_degree(G, weight="length")
    stats["avg_weighted_neighbor_degree"] = avg_wtd_nbr_deg
    stats["avg_weighted_neighbor_degree_avg"] = sum(
        avg_wtd_nbr_deg.values()) / len(avg_wtd_nbr_deg)

    # degree centrality for a node is the fraction of nodes it is connected to
    degree_centrality = nx.degree_centrality(G)
    stats["degree_centrality"] = degree_centrality
    stats["degree_centrality_avg"] = sum(
        degree_centrality.values()) / len(degree_centrality)

    # calculate clustering coefficient for the nodes
    stats["clustering_coefficient"] = nx.clustering(Gu)

    # average clustering coefficient for the graph
    stats["clustering_coefficient_avg"] = nx.average_clustering(Gu)

    # calculate weighted clustering coefficient for the nodes
    stats["clustering_coefficient_weighted"] = nx.clustering(Gu,
                                                             weight="length")

    # average clustering coefficient (weighted) for the graph
    stats["clustering_coefficient_weighted_avg"] = nx.average_clustering(
        Gu, weight="length")

    # pagerank: a ranking of the nodes in the graph based on the structure of
    # the incoming links
    pagerank = nx.pagerank(D, weight="length")
    stats["pagerank"] = pagerank

    # node with the highest page rank, and its value
    pagerank_max_node = max(pagerank, key=lambda x: pagerank[x])
    stats["pagerank_max_node"] = pagerank_max_node
    stats["pagerank_max"] = pagerank[pagerank_max_node]

    # node with the lowest page rank, and its value
    pagerank_min_node = min(pagerank, key=lambda x: pagerank[x])
    stats["pagerank_min_node"] = pagerank_min_node
    stats["pagerank_min"] = pagerank[pagerank_min_node]

    # if True, calculate node and edge connectivity
    if connectivity:

        # node connectivity is the minimum number of nodes that must be removed
        # to disconnect G or render it trivial
        stats["node_connectivity"] = nx.node_connectivity(Gs)

        # edge connectivity is equal to the minimum number of edges that must be
        # removed to disconnect G or render it trivial
        stats["edge_connectivity"] = nx.edge_connectivity(Gs)
        utils.log("Calculated node and edge connectivity")

    # if True, calculate average node connectivity
    if anc:
        # mean number of internally node-disjoint paths between each pair of
        # nodes in G, i.e., the expected number of nodes that must be removed to
        # disconnect a randomly selected pair of non-adjacent nodes
        stats["node_connectivity_avg"] = nx.average_node_connectivity(G)
        utils.log("Calculated average node connectivity")

    # if True, calculate shortest paths, eccentricity, and topological metrics
    # that use eccentricity
    if ecc:
        # precompute shortest paths between all nodes for eccentricity-based
        # stats
        sp = {
            source: dict(
                nx.single_source_dijkstra_path_length(Gs,
                                                      source,
                                                      weight="length"))
            for source in Gs.nodes()
        }

        utils.log("Calculated shortest path lengths")

        # eccentricity of a node v is the maximum distance from v to all other
        # nodes in G
        eccentricity = nx.eccentricity(Gs, sp=sp)
        stats["eccentricity"] = eccentricity

        # diameter is the maximum eccentricity
        diameter = nx.diameter(Gs, e=eccentricity)
        stats["diameter"] = diameter

        # radius is the minimum eccentricity
        radius = nx.radius(Gs, e=eccentricity)
        stats["radius"] = radius

        # center is the set of nodes with eccentricity equal to radius
        center = nx.center(Gs, e=eccentricity)
        stats["center"] = center

        # periphery is the set of nodes with eccentricity equal to the diameter
        periphery = nx.periphery(Gs, e=eccentricity)
        stats["periphery"] = periphery

    # if True, calculate node closeness centrality
    if cc:
        # closeness centrality of a node is the reciprocal of the sum of the
        # shortest path distances from u to all other nodes
        closeness_centrality = nx.closeness_centrality(G, distance="length")
        stats["closeness_centrality"] = closeness_centrality
        stats["closeness_centrality_avg"] = sum(
            closeness_centrality.values()) / len(closeness_centrality)
        utils.log("Calculated closeness centrality")

    # if True, calculate node betweenness centrality
    if bc:
        # betweenness centrality of a node is the sum of the fraction of
        # all-pairs shortest paths that pass through node
        # networkx 2.4+ implementation cannot run on Multi(Di)Graphs, so use DiGraph
        betweenness_centrality = nx.betweenness_centrality(D, weight="length")
        stats["betweenness_centrality"] = betweenness_centrality
        stats["betweenness_centrality_avg"] = sum(
            betweenness_centrality.values()) / len(betweenness_centrality)
        utils.log("Calculated betweenness centrality")

    utils.log("Calculated extended stats")
    return stats
예제 #47
0
파일: stats.py 프로젝트: qiaofei32/osmnx
def extended_stats(G,
                   connectivity=False,
                   anc=False,
                   ecc=False,
                   bc=False,
                   cc=False):
    """
    Calculate extended topological stats and metrics for a graph. Global topological analysis of large complex networks is extremely 
    time consuming and may exhaust computer memory. Consider using function arguments to not run metrics that require computation of
    a full matrix of paths if they will not be needed.
    
    Parameters
    ----------
    G : graph
    connectivity : bool, if True, calculate node and edge connectivity
    anc : bool, if True, calculate average node connectivity
    ecc : bool, if True, calculate shortest paths, eccentricity, and topological metrics that use eccentricity
    bc : bool, if True, calculate node betweenness centrality
    cc : bool, if True, calculate node closeness centrality
    
    Returns
    -------
    stats : dict, containing the following elements (some only calculated/returned optionally, based on passed parameters):
        avg_neighbor_degree
        avg_neighbor_degree_avg
        avg_weighted_neighbor_degree
        avg_weighted_neighbor_degree_avg
        degree_centrality
        degree_centrality_avg
        clustering_coefficient
        clustering_coefficient_avg
        clustering_coefficient_weighted
        clustering_coefficient_weighted_avg
        pagerank
        pagerank_max_node
        pagerank_max
        pagerank_min_node
        pagerank_min
        node_connectivity
        node_connectivity_avg
        edge_connectivity
        eccentricity
        diameter
        radius
        center
        periphery
        closeness_centrality
        closeness_centrality_avg
        betweenness_centrality
        betweenness_centrality_avg
    """

    stats = {}
    full_start_time = time.time()

    # create a DiGraph from the MultiDiGraph, for those metrics that require it
    G_dir = nx.DiGraph(G)

    # create an undirected Graph from the MultiDiGraph, for those metrics that require it
    G_undir = nx.Graph(G)

    # get the largest strongly connected component, for those metrics that require strongly connected graphs
    G_strong = get_largest_component(G, strongly=True)

    # average degree of the neighborhood of each node, and average for the graph
    avg_neighbor_degree = nx.average_neighbor_degree(G)
    stats['avg_neighbor_degree'] = avg_neighbor_degree
    stats['avg_neighbor_degree_avg'] = sum(
        avg_neighbor_degree.values()) / len(avg_neighbor_degree)

    # average weighted degree of the neighborhood of each node, and average for the graph
    avg_weighted_neighbor_degree = nx.average_neighbor_degree(G,
                                                              weight='length')
    stats['avg_weighted_neighbor_degree'] = avg_weighted_neighbor_degree
    stats['avg_weighted_neighbor_degree_avg'] = sum(
        avg_weighted_neighbor_degree.values()) / len(
            avg_weighted_neighbor_degree)

    # degree centrality for a node is the fraction of nodes it is connected to
    degree_centrality = nx.degree_centrality(G)
    stats['degree_centrality'] = degree_centrality
    stats['degree_centrality_avg'] = sum(
        degree_centrality.values()) / len(degree_centrality)

    # calculate clustering coefficient for the nodes
    stats['clustering_coefficient'] = nx.clustering(G_undir)

    # average clustering coefficient for the graph
    stats['clustering_coefficient_avg'] = nx.average_clustering(G_undir)

    # calculate weighted clustering coefficient for the nodes
    stats['clustering_coefficient_weighted'] = nx.clustering(G_undir,
                                                             weight='length')

    # average clustering coefficient (weighted) for the graph
    stats['clustering_coefficient_weighted_avg'] = nx.average_clustering(
        G_undir, weight='length')

    # pagerank: a ranking of the nodes in the graph based on the structure of the incoming links
    pagerank = nx.pagerank(G_dir, weight='length')
    stats['pagerank'] = pagerank

    # node with the highest page rank, and its value
    pagerank_max_node = max(pagerank, key=lambda x: pagerank[x])
    stats['pagerank_max_node'] = pagerank_max_node
    stats['pagerank_max'] = pagerank[pagerank_max_node]

    # node with the lowest page rank, and its value
    pagerank_min_node = min(pagerank, key=lambda x: pagerank[x])
    stats['pagerank_min_node'] = pagerank_min_node
    stats['pagerank_min'] = pagerank[pagerank_min_node]

    # if True, calculate node and edge connectivity
    if connectivity:
        start_time = time.time()

        # node connectivity is the minimum number of nodes that must be removed to disconnect G or render it trivial
        stats['node_connectivity'] = nx.node_connectivity(G_strong)

        # edge connectivity is equal to the minimum number of edges that must be removed to disconnect G or render it trivial
        stats['edge_connectivity'] = nx.edge_connectivity(G_strong)
        log('Calculated node and edge connectivity in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate average node connectivity
    if anc:
        # mean number of internally node-disjoint paths between each pair of nodes in G
        # i.e., the expected number of nodes that must be removed to disconnect a randomly selected pair of non-adjacent nodes
        start_time = time.time()
        stats['node_connectivity_avg'] = nx.average_node_connectivity(G)
        log('Calculated average node connectivity in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate shortest paths, eccentricity, and topological metrics that use eccentricity
    if ecc:
        # precompute shortest paths between all nodes for eccentricity-based stats
        start_time = time.time()
        sp = {
            source: nx.single_source_dijkstra_path_length(G_strong,
                                                          source,
                                                          weight='length')
            for source in G_strong.nodes()
        }
        log('Calculated shortest path lengths in {:,.2f} seconds'.format(
            time.time() - start_time))

        # eccentricity of a node v is the maximum distance from v to all other nodes in G
        eccentricity = nx.eccentricity(G_strong, sp=sp)
        stats['eccentricity'] = eccentricity

        # diameter is the maximum eccentricity
        diameter = nx.diameter(G_strong, e=eccentricity)
        stats['diameter'] = diameter

        # radius is the minimum eccentricity
        radius = nx.radius(G_strong, e=eccentricity)
        stats['radius'] = radius

        # center is the set of nodes with eccentricity equal to radius
        center = nx.center(G_strong, e=eccentricity)
        stats['center'] = center

        # periphery is the set of nodes with eccentricity equal to the diameter
        periphery = nx.periphery(G_strong, e=eccentricity)
        stats['periphery'] = periphery

    # if True, calculate node closeness centrality
    if cc:
        # closeness centrality of a node is the reciprocal of the sum of the shortest path distances from u to all other nodes
        start_time = time.time()
        closeness_centrality = nx.closeness_centrality(G, distance='length')
        stats['closeness_centrality'] = closeness_centrality
        stats['closeness_centrality_avg'] = sum(
            closeness_centrality.values()) / len(closeness_centrality)
        log('Calculated closeness centrality in {:,.2f} seconds'.format(
            time.time() - start_time))

    # if True, calculate node betweenness centrality
    if bc:
        # betweenness centrality of a node is the sum of the fraction of all-pairs shortest paths that pass through node
        start_time = time.time()
        betweenness_centrality = nx.betweenness_centrality(G, weight='length')
        stats['betweenness_centrality'] = betweenness_centrality
        stats['betweenness_centrality_avg'] = sum(
            betweenness_centrality.values()) / len(betweenness_centrality)
        log('Calculated betweenness centrality in {:,.2f} seconds'.format(
            time.time() - start_time))

    log('Calculated extended stats in {:,.2f} seconds'.format(time.time() -
                                                              full_start_time))
    return stats
import networkx as nx
import network_attack as na

graph = nx.erdos_renyi_graph(n=300, p=0.1)
print("the total number of edges:")
n_edges = nx.number_of_edges(graph)
print(n_edges)
print("the total number of nodes:")
n_nodes = nx.number_of_nodes(graph)
print(n_nodes)
n_cc = nx.number_connected_components(graph)
print("the total number of connected components:")
print(n_cc)
print("the density of the graph:")
print(nx.diameter(graph))
avg_deg = sum([d for (n, d) in nx.degree(graph)]) / float(graph.number_of_nodes())
print("the average degree is " + str(avg_deg))

closeness_centrality = nx.closeness_centrality
pagerank_centrality = nx.pagerank
betweenness_centrality = nx.betweenness_centrality

#   GCC ATTACK
clo_gcc_att = na.gcc_attack(graph, closeness_centrality)
pgr_gcc_att = na.gcc_attack(graph, pagerank_centrality)
bet_gcc_att = na.gcc_attack(graph, betweenness_centrality)
rnd_gcc = na.rnd_gcc_attack(graph, 1)
na.attack_measures_plot("THe Giant Component Component Size", clo_gcc_att, pgr_gcc_att, bet_gcc_att, rnd_gcc)

#   Diameter ATTACK
clo_dia_att = na.diameter_attack(graph, closeness_centrality)
예제 #49
0
파일: strategy.py 프로젝트: Jeswang/icarus
 def __init__(self, view, controller, max_stretch=0.2):
     super(HashroutingHybridAM, self).__init__(view, controller)
     self.max_stretch = nx.diameter(view.topology()) * max_stretch
# density is (number of potential edges) / (number of actual edges)
# for an undirected graph with n nodes and m edges
#     number potential edges = n(n-1)/2
#     number actual edges = m
#     density = 2m/[n*(n-1)]
# for an undirected graph with n nodes
#     number potential edges = n(n-1)
#     number actual edges = m
#     density = m/[n*(n-1)]
print("Density:")
print(networkx.density(G))
print()

# diameter is the longest distance between any two nodes in the graph
print("Diameter:")
print(networkx.diameter(G))
print()

# Adjacency Matrix - ex: a 1 in cell A-B means there’s an edge between them
#      A B C D E
#    A 0 1 0 1 1
#    B 1 0 0 1 0
#    C 0 0 0 1 1
#    D 1 1 1 0 0
#    E 1 0 1 0 0
print("Adjacency Matrix:")
adjacencyList = G.adjacency_list()
for n in networkx.nodes(G):
    print(n, adjacencyList[n])

pos = networkx.spring_layout(G)
def answer_eight():
        G = answer_six()
        return nx.diameter(G)
예제 #52
0
def get_imn_features(imn_list, event_traj2evntlist):

    nbr_locations = list()
    nbr_movements = list()
    nbr_reg_locations = list()
    nbr_reg_movements = list()
    radius_of_gyration = list()
    regular_radius_of_gyration = list()
    entropy = list()
    rentropy = list()
    avg_mov_length = list()
    std_mov_length = list()
    avg_mov_duration = list()
    std_mov_duration = list()
    avg_reg_mov_length = list()
    std_reg_mov_length = list()
    avg_reg_mov_duration = list()
    std_reg_mov_duration = list()

    density = list()
    triangles = list()
    clustering_coefficient = list()
    degree = list()
    indegree = list()
    outdegree = list()
    diameter = list()
    eccentricity = list()
    assortativity = list()

    l1_count = list()
    l2_count = list()
    l3_count = list()
    l1_indegree = list()
    l2_indegree = list()
    l3_indegree = list()
    l1_outdegree = list()
    l2_outdegree = list()
    l3_outdegree = list()
    l1_dcentrality = list()
    l2_dcentrality = list()
    l3_dcentrality = list()
    l1_bcentrality = list()
    l2_bcentrality = list()
    l3_bcentrality = list()
    l1_events = defaultdict(list)
    l2_events = defaultdict(list)
    l3_events = defaultdict(list)

    l1l2_count = list()
    l2l1_count = list()
    l1l3_count = list()
    l3l1_count = list()
    l2l3_count = list()
    l3l2_count = list()
    l1l2_betweenness = list()
    l2l1_betweenness = list()
    l1l3_betweenness = list()
    l3l1_betweenness = list()
    l2l3_betweenness = list()
    l3l2_betweenness = list()
    l1l2_events = defaultdict(list)
    l2l1_events = defaultdict(list)
    l1l3_events = defaultdict(list)
    l3l1_events = defaultdict(list)
    l2l3_events = defaultdict(list)
    l3l2_events = defaultdict(list)

    mov_event_entropy = defaultdict(list)

    for m0m1, imn in imn_list.items():
        if imn is None:
            continue
        # print(m0m1, imn.keys())
        # print(json.dumps(clear_tuples4json(imn), default=agenda_converter))
        nbr_locations.append(imn['n_locs'])
        nbr_movements.append(imn['n_movs'])
        nbr_reg_locations.append(imn['n_reg_locs'])
        nbr_reg_movements.append(imn['n_reg_movs'])
        radius_of_gyration.append(imn['rg'])
        regular_radius_of_gyration.append(imn['rrg'])
        entropy.append(imn['entropy'])
        rentropy.append(imn['rentropy'])
        avg_mov_length.append(imn['avg_mov_length'])
        std_mov_length.append(imn['std_mov_length'])
        avg_mov_duration.append(string2timedelta(imn['avg_mov_duration']).total_seconds())
        std_mov_duration.append(string2timedelta(imn['std_mov_duration']).total_seconds())
        avg_reg_mov_length.append(imn['avg_reg_mov_length'])
        std_reg_mov_length.append(imn['std_reg_mov_length'])
        avg_reg_mov_duration.append(string2timedelta(imn['avg_reg_mov_duration']).total_seconds())
        std_reg_mov_duration.append(string2timedelta(imn['std_reg_mov_duration']).total_seconds())

        graph = imn['graph']
        if not isinstance(graph, nx.DiGraph):
            graph = json_graph.node_link_graph(imn['graph'], directed=True, multigraph=False,
                                               attrs={'link': 'edges', 'source': 'from', 'target': 'to'})
        density.append(nx.density(graph))
        triangles.append(np.mean(list(nx.triangles(nx.to_undirected(graph)).values())))
        clustering_coefficient.append(nx.average_clustering(graph))
        degree.append(np.mean(list(dict(nx.to_undirected(graph).degree()).values())))
        indegree.append(np.mean(list(dict(graph.in_degree()).values())))
        outdegree.append(np.mean(list(dict(graph.out_degree()).values())))
        if nx.is_connected(nx.to_undirected(graph)):
            diameter.append(nx.diameter(nx.to_undirected(graph)))
            eccentricity.append(np.mean(list(nx.eccentricity(nx.to_undirected(graph)).values())))
            assortativity.append(nx.degree_assortativity_coefficient(nx.to_undirected(graph)))
        else:
            Gc = max(nx.connected_component_subgraphs(nx.to_undirected(graph)), key=len)
            diameter.append(nx.diameter(Gc))
            eccentricity.append(np.mean(list(nx.eccentricity(Gc).values())))
            assortativity.append(nx.degree_assortativity_coefficient(Gc))

        # print(imn['location_features'].keys())
        # print(list(imn['location_features'].keys())[0], type(list(imn['location_features'].keys())[0]))
        if isinstance(list(imn['location_features'].keys())[0], int):
            l1, l2, l3 = 0, 1, 2
        else:
            l1, l2, l3 = '0', '1', '2'

        l1_count.append(imn['location_features'][l1]['loc_support'])
        l2_count.append(imn['location_features'][l2]['loc_support'])
        if l3 in imn['location_features']:
            l3_count.append(imn['location_features'][l3]['loc_support'])
        in_degree = dict(graph.in_degree())
        out_degree = dict(graph.out_degree())
        dcentrality = nx.degree_centrality(graph)
        bcentrality = nx.betweenness_centrality(graph)
        l1_indegree.append(in_degree[0])
        l2_indegree.append(in_degree[1])
        if 2 in in_degree:
            l3_indegree.append(in_degree[2])
        l1_outdegree.append(out_degree[0])
        l2_outdegree.append(out_degree[1])
        if 2 in out_degree:
            l3_outdegree.append(out_degree[2])
        l1_dcentrality.append(dcentrality[0])
        l2_dcentrality.append(dcentrality[1])
        if 2 in dcentrality:
            l3_dcentrality.append(dcentrality[2])
        l1_bcentrality.append(bcentrality[0])
        l2_bcentrality.append(bcentrality[1])
        if 2 in bcentrality:
            l3_bcentrality.append(bcentrality[2])

        l1_nbr_events_type = defaultdict(int)
        l2_nbr_events_type = defaultdict(int)
        l3_nbr_events_type = defaultdict(int)

        l1l2_nbr_events_type = defaultdict(int)
        l2l1_nbr_events_type = defaultdict(int)
        l1l3_nbr_events_type = defaultdict(int)
        l3l1_nbr_events_type = defaultdict(int)
        l2l3_nbr_events_type = defaultdict(int)
        l3l2_nbr_events_type = defaultdict(int)

        mov_event_count = defaultdict(lambda: defaultdict(int))

        for tid in imn['traj_location_from_to']:
            for evnt in event_traj2evntlist[tid]:
                if imn['traj_location_from_to'][tid][1] == 0:
                    l1_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][1] == 1:
                    l2_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][1] == 2:
                    l3_nbr_events_type[evnt['event_type']] += 1

                if imn['traj_location_from_to'][tid][0] == 0 and imn['traj_location_from_to'][tid][1] == 1:
                    l1l2_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][0] == 1 and imn['traj_location_from_to'][tid][1] == 0:
                    l2l1_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][0] == 0 and imn['traj_location_from_to'][tid][1] == 2:
                    l1l3_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][0] == 2 and imn['traj_location_from_to'][tid][1] == 0:
                    l3l1_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][0] == 1 and imn['traj_location_from_to'][tid][1] == 2:
                    l2l3_nbr_events_type[evnt['event_type']] += 1
                elif imn['traj_location_from_to'][tid][0] == 2 and imn['traj_location_from_to'][tid][1] == 1:
                    l3l2_nbr_events_type[evnt['event_type']] += 1

                lft = imn['traj_location_from_to'][tid][1]
                mov_event_count[evnt['event_type']][lft] += 1

        for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:
            if event_type in l1_nbr_events_type:
                l1_events[event_type].append(l1_nbr_events_type[event_type])
            else:
                l1_events[event_type].append(0)
            if event_type in l2_nbr_events_type:
                l2_events[event_type].append(l2_nbr_events_type[event_type])
            else:
                l2_events[event_type].append(0)
            if event_type in l3_nbr_events_type:
                l3_events[event_type].append(l3_nbr_events_type[event_type])
            else:
                l3_events[event_type].append(0)

            if event_type in l1l2_nbr_events_type:
                l1l2_events[event_type].append(l1l2_nbr_events_type[event_type])
            else:
                l1l2_events[event_type].append(0)
            if event_type in l2l1_nbr_events_type:
                l2l1_events[event_type].append(l2l1_nbr_events_type[event_type])
            else:
                l2l1_events[event_type].append(0)

            if event_type in l1l3_nbr_events_type:
                l1l3_events[event_type].append(l1l3_nbr_events_type[event_type])
            else:
                l1l3_events[event_type].append(0)
            if event_type in l3l1_nbr_events_type:
                l3l1_events[event_type].append(l3l1_nbr_events_type[event_type])
            else:
                l3l1_events[event_type].append(0)

            if event_type in l2l3_nbr_events_type:
                l2l3_events[event_type].append(l2l3_nbr_events_type[event_type])
            else:
                l2l3_events[event_type].append(0)
            if event_type in l3l1_nbr_events_type:
                l3l2_events[event_type].append(l3l2_nbr_events_type[event_type])
            else:
                l3l2_events[event_type].append(0)

            if event_type in mov_event_count:
                vals = list(mov_event_count[event_type].values())
                mov_event_entropy[event_type].append(calculate_entropy(vals, classes=len(vals)))
            else:
                mov_event_entropy[event_type].append(0.0)

        l1l2_count.append(imn['location_nextlocs'][l1].get(l2, 0))
        l2l1_count.append(imn['location_nextlocs'][l2].get(l1, 0))
        l1l3_count.append(imn['location_nextlocs'][l1].get(l3, 0))
        if '2' in imn['location_nextlocs']:
            l3l1_count.append(imn['location_nextlocs'][l3].get(l1, 0))
            l2l3_count.append(imn['location_nextlocs'][l2].get(l3, 0))
            l3l2_count.append(imn['location_nextlocs'][l3].get(l2, 0))
        else:
            l3l1_count.append(0)
            l2l3_count.append(0)
            l3l2_count.append(0)
        edge_betweenness = nx.edge_betweenness(graph)
        l1l2_betweenness.append(edge_betweenness.get((0, 1), 0))
        l2l1_betweenness.append(edge_betweenness.get((1, 0), 0))
        l1l3_betweenness.append(edge_betweenness.get((0, 2), 0))
        l3l1_betweenness.append(edge_betweenness.get((2, 0), 0))
        l2l3_betweenness.append(edge_betweenness.get((1, 2), 0))
        l3l2_betweenness.append(edge_betweenness.get((2, 1), 0))

    imn_temporal_features = get_imn_temporal_features(imn_list)

    features = {
        'nbr_locations': np.mean(nbr_locations),
        'nbr_movements': np.mean(nbr_movements),
        'nbr_reg_locations': np.mean(nbr_reg_locations),
        'nbr_reg_movements': np.mean(nbr_reg_movements),
        'radius_of_gyration': np.mean(radius_of_gyration),
        'regular_radius_of_gyration': np.mean(regular_radius_of_gyration),
        'entropy': np.mean(entropy),
        'rentropy': np.mean(rentropy),
        'avg_mov_length': np.mean(avg_mov_length),
        'std_mov_length': np.mean(std_mov_length),
        'avg_mov_duration': np.mean(avg_mov_duration),
        'std_mov_duration': np.mean(std_mov_duration),
        # 'avg_reg_mov_length': np.mean(avg_reg_mov_length),
        # 'std_reg_mov_length': np.mean(std_reg_mov_length),
        'avg_reg_mov_duration': np.mean(avg_reg_mov_duration),
        'std_reg_mov_duration': np.mean(std_reg_mov_duration),

        'density': np.mean(density),
        'triangles': np.mean(triangles),
        'clustering_coefficient': np.mean(clustering_coefficient),
        'avg_degree': np.mean(degree),
        'avg_indegree': np.mean(indegree),
        'avg_outdegree': np.mean(outdegree),
        'diameter': np.mean(diameter),
        'eccentricity': np.mean(eccentricity),
        'assortativity': np.mean(assortativity),

        'l1_count': np.mean(l1_count),
        'l2_count': np.mean(l2_count),
        'l3_count': np.mean(l3_count),
        'l1_indegree': np.mean(l1_indegree),
        'l2_indegree': np.mean(l2_indegree),
        'l3_indegree': np.mean(l3_indegree),
        'l1_outdegree': np.mean(l1_outdegree),
        'l2_outdegree': np.mean(l2_outdegree),
        'l3_outdegree': np.mean(l3_outdegree),
        'l1_dcentrality': np.mean(l1_dcentrality),
        'l2_dcentrality': np.mean(l2_dcentrality),
        'l3_dcentrality': np.mean(l3_dcentrality),
        'l1_bcentrality': np.mean(l1_bcentrality),
        'l2_bcentrality': np.mean(l2_bcentrality),
        'l3_bcentrality': np.mean(l3_bcentrality),

        'l1l2_count': np.mean(l1l2_count),
        'l2l1_count': np.mean(l2l1_count),
        'l1l3_count': np.mean(l1l3_count),
        'l3l1_count': np.mean(l3l1_count),
        'l2l3_count': np.mean(l2l3_count),
        'l3l2_count': np.mean(l3l2_count),
        'l1l2_betweenness': np.mean(l1l2_betweenness),
        'l2l1_betweenness': np.mean(l2l1_betweenness),
        'l1l3_betweenness': np.mean(l1l3_betweenness),
        'l3l1_betweenness': np.mean(l3l1_betweenness),
        'l2l3_betweenness': np.mean(l2l3_betweenness),
        'l3l2_betweenness': np.mean(l3l2_betweenness),
    }

    features.update(imn_temporal_features)

    for event_type in ['Q', 'B', 'A', 'C', 'stop', 'start']:
        features['l1_%s' % event_type] = np.mean(l1_events[event_type])
        features['l2_%s' % event_type] = np.mean(l2_events[event_type])
        features['l3_%s' % event_type] = np.mean(l3_events[event_type])
        features['l1l2_%s' % event_type] = np.mean(l1l2_events[event_type])
        features['l2l1_%s' % event_type] = np.mean(l2l1_events[event_type])
        features['l1l3_%s' % event_type] = np.mean(l1l3_events[event_type])
        features['l3l1_%s' % event_type] = np.mean(l3l1_events[event_type])
        features['l2l3_%s' % event_type] = np.mean(l2l3_events[event_type])
        features['l3l2_%s' % event_type] = np.mean(l3l2_events[event_type])
        features['mov_entropy_%s' % event_type] = np.mean(mov_event_entropy[event_type])

    for k, v in features.items():
        if np.isnan(v):
            features[k] = -1

    return features