def augmentNodes(g):
    r1 = nx.eigenvector_centrality_numpy(g)
    r2 = nx.degree_centrality(g) # DP MY
    r3 = nx.betweenness_centrality(g)
    r5 = nx.load_centrality(g,weight='weight') # DY, WY-writename # Scientific collaboration networks: II. Shortest paths, weighted networks, and centrality, M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
    r6 = nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')
    
    if nx.is_directed(g) == True:
        r8 = nx.in_degree_centrality(g)
        r9 = nx.out_degree_centrality(g)
#        r10 = nx.hits(g, max_iter=100, tol=1e-08, nstart=None)
    else:
        r4 = nx.communicability_centrality(g)
        r7 = nx.clustering(g, weight='weight')
        
    for x in g.nodes():
        g.node[x]['eigenvector_centrality_numpy'] = r1[x]
        g.node[x]['degree_centrality'] = r2[x]  
        g.node[x]['betweenness_centrality'] = r3[x]
        g.node[x]['load_centrality'] = r5[x]  
        g.node[x]['pagerank'] = r6[x]

        if nx.is_directed(g) == True:
            g.node[x]['in_degree_centrality'] = r8[x]
            g.node[x]['out_degree_centrality'] = r9[x]
#            g.node[x]['hits'] = r10[x]
        else:
            g.node[x]['communicability_centrality'] = r4[x]
            g.node[x]['clustering'] = r7[x]
    return g        
def eval_proximity_importance(network,graph_xml):
    '''returns the proximity of page rank scores distributions between synthetic network(test) and real network (goal)'''
    #we need to reverse the network to get a score such that the importance of a node is related to the importance of nodes that point towards it.
    
    if network.is_directed() :
        importance_test = nx.eigenvector_centrality_numpy(network.reverse()).values()
    else :
        importance_test = nx.eigenvector_centrality_numpy(network).values()
    
    importance_goal = eval(graph_xml.find('importance').get('value'))
    
    proximity = proximity_distributions_different_size(importance_goal,importance_test)
    return proximity
Example #3
0
 def calculate_eigenvector(self):
     eigen_attack = []
     G = nx.Graph()
     G.add_nodes_from(range(self.node_num))
     G.add_weighted_edges_from(self.aggregated_list)
     eigen = nx.eigenvector_centrality_numpy(G)
     eigen_sort = sorted(eigen, key=eigen.__getitem__, reverse=True)
     eigen_attack.append(eigen_sort[0])
     for num_of_deletion in range (0,self.node_num/2-1):
         G.remove_node(eigen_sort[0])
         eigen = nx.eigenvector_centrality_numpy(G)
         eigen_sort = sorted(eigen, key=eigen.__getitem__, reverse=True)
         eigen_attack.append(eigen_sort[0])
     return eigen_attack
 def test_eigenvector_v_katz_random(self):
     G = nx.gnp_random_graph(10,0.5, seed=1234)
     l = float(max(eigvals(nx.adjacency_matrix(G).todense())))
     e = nx.eigenvector_centrality_numpy(G)
     k = nx.katz_centrality_numpy(G, 1.0/l)
     for n in G:
         assert_almost_equal(e[n], k[n])
def randomEigenvectorSampling(G_, keptNodes):
    sumEigen = 0.0
    eigenvector = nx.eigenvector_centrality_numpy(G_)
    for node in G_.nodes():
        sumEigen = sumEigen+eigenvector[node]
    probs = []
    picked = []
    for node in G_.nodes():
        probs.append(eigenvector[node]/sumEigen)
    cumEigenProbs = cumulative_sum(probs)
    cumEigenProbs[len(cumEigenProbs)-1] = 1.0
    num = 0
    while num < keptNodes:
        random.seed(time.clock())
        number = random.random()
        for node in range(0, len(G_.nodes())):
            if (number <= cumEigenProbs[node]):
                if(G_.nodes()[node] not in picked):
                    print "Adding node "+ str(G_.nodes()[node])
                    picked.append(G_.nodes()[node])
                    num = num+1
                    break
                else:
                    #print "Collision"
                    break
    return picked
    def concepts(self, terms):
        paths = self._synset_paths(terms)
        root = _path_root(paths).split('.')[0]
        self.graph = _create_subgraph(paths, root)

        return sorted(nx.eigenvector_centrality_numpy(self.graph, weight='w').items(),
                      key=lambda x: x[1], reverse=True)
def centrality(net):
    values ={}
    close = nx.closeness_centrality(net, normalized= True)
    eigen = nx.eigenvector_centrality_numpy(net)
    page = nx.pagerank(net)
    bet = nx.betweenness_centrality(net,normalized= True)
    flow_c = nx.current_flow_closeness_centrality(net,normalized= True)
    flow_b = nx.current_flow_betweenness_centrality(net,normalized= True)
    load = nx.load_centrality(net, normalized = True)
    com_c = nx.communicability_centrality(net)
    com_b = nx.communicability_betweenness_centrality(net, normalized= True)
    degree = net.degree()
    
    file3 = open("bl.csv",'w')
    for xt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
        for yt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
            corr(xt.values(),yt.values(),file3)
        print
        file3.write("\n")
    file3.close()
    #plt.plot(x,y, 'o')
    #plt.plot(x, m*x + c, 'r', label='Fitted line')
    #plt.show()
    #for key,item in close.iteritems() :
        #values[key] = [impo.get(key),bet.get(key),flow_b.get(key), load.get(key),com_c.get(key),com_b.get(key)]
        
    return values
def analyze_graph(G):    
    #centralities and node metrics
    out_degrees = G.out_degree()
    in_degrees = G.in_degree()
    betweenness = nx.betweenness_centrality(G)
    eigenvector = nx.eigenvector_centrality_numpy(G)
    closeness = nx.closeness_centrality(G)
    pagerank = nx.pagerank(G)
    avg_neighbour_degree = nx.average_neighbor_degree(G)
    redundancy = bipartite.node_redundancy(G)
    load = nx.load_centrality(G)
    hits = nx.hits(G)
    vitality = nx.closeness_vitality(G)
    
    for name in G.nodes():
        G.node[name]['out_degree'] = out_degrees[name]
        G.node[name]['in_degree'] = in_degrees[name]
        G.node[name]['betweenness'] = betweenness[name]
        G.node[name]['eigenvector'] = eigenvector[name]
        G.node[name]['closeness'] = closeness[name]
        G.node[name]['pagerank'] = pagerank[name]
        G.node[name]['avg-neigh-degree'] = avg_neighbour_degree[name]
        G.node[name]['redundancy'] = redundancy[name]
        G.node[name]['load'] = load[name]
        G.node[name]['hits'] = hits[name]
        G.node[name]['vitality'] = vitality[name]
        
    #communities
    partitions = community.best_partition(G)
    for member, c in partitions.items():
        G.node[member]['community'] = c   
    
    return G
Example #9
0
def Centrality(Au):
    """docstring for Centrality"""
    b = nx.betweenness_centrality(Au)
    e = nx.eigenvector_centrality_numpy(Au)
    c = nx.closeness_centrality(Au)
    d = nx.degree_centrality(Au)
    return b, e, c, d
 def test_P3_unweighted(self):
     """Eigenvector centrality: P3"""
     G=nx.path_graph(3)
     b_answer={0: 0.5, 1: 0.7071, 2: 0.5}
     b=nx.eigenvector_centrality_numpy(G, weight=None)
     for n in sorted(G):
         assert_almost_equal(b[n],b_answer[n],places=4)
def get_sna(path):
    sna_data = {}
    print 'Building relations graph'
    G = nx.read_gexf(path)
    print 'Nodes:', len(G.nodes())
    print 'Edges:', len(G.edges())
        
    print 'Calculating centralities:'
    print '    -degrees'
    degrees = G.degree()    
    for c in degrees:
        sna_data[c] = { 'degree':degrees[c],
                            'betweenness':0,
                            'closeness':0,
                            'eigenvector':0}
        
    print '    -betweenness'
    betweenness = nx.betweenness_centrality(G)
    for c in betweenness:
        sna_data[c]['betweenness'] = betweenness[c]
        
    print '    -closeness'
    closeness = nx.closeness_centrality(G)
    for c in closeness:
        sna_data[c]['closeness'] = closeness[c]
        
    print '    -eigenvector'
    eigenvector = nx.eigenvector_centrality_numpy(G)
    for c in eigenvector:
        sna_data[c]['eigenvector'] = eigenvector[c]
        
    return sna_data
Example #12
0
def centrality_scores(vote_matrix, season_graph):
    deg = nx.degree(season_graph)
    deg = {k: round(v,1) for k,v in deg.iteritems()}

    close = nx.closeness_centrality(season_graph)
    close = {k: round(v,3) for k,v in close.iteritems()}

    btw = nx.betweenness_centrality(season_graph)
    btw = {k: round(v,3) for k,v in btw.iteritems()}

    eig = nx.eigenvector_centrality_numpy(season_graph)
    eig = {k: round(v,3) for k,v in eig.iteritems()}
    
    page = nx.pagerank(season_graph)
    page = {k: round(v,3) for k,v in page.iteritems()}

    # Add contestant placement (rank)
    order = list(vote_matrix.index)
    place_num = list(range(len(order)))
    place = {order[i]:i+1 for i in place_num}
    
    names = season_graph.nodes()

    # Build a table with centralities 
    table=[[name, deg[name], close[name], btw[name], eig[name], page[name], place[name]] for name in names]

    # Convert table to pandas df
    headers = ['name', 'deg', 'close', 'btw', 'eig', 'page', 'place']
    df = pd.DataFrame(table, columns=headers)
    df = df.sort_values(['page', 'eig', 'deg'], ascending=False)
    
    return df
def set_evaluation_datas(graph,graph_xml,**kwargs) :
    '''if no precise evaluation method is given, we compute every possible measure (wrong !!)'''
    
    evaluation_method = kwargs.get('evaluation_method','')
    
    def add_sub(name,value):
        sub = xml.SubElement(graph_xml,name)
        sub.attrib['value'] = str(value)
        
    #First relevant infos are number of nodes and number of edges, 
    #should be dependant on the method used to develop the network, 
    #but until now they are necessary and always stored
    add_sub('number_of_nodes',nx.number_of_nodes(graph))
    add_sub('number_of_edges',nx.number_of_edges(graph))    
    
    #number of nodes
    nodes = nx.number_of_nodes(graph)
    
    #should be replaced by getattr(graph, variable) loop
    if graph.is_directed() :
        if 'vertices' in evaluation_method :
            add_sub('vertices',nx.number_of_edges(graph)/(nodes*(nodes-1)))
        if 'degrees' in evaluation_method :
            add_sub('degree_in',graph.in_degree().values())
            add_sub('degree_out', graph.out_degree().values())
        if 'importance' in evaluation_method :
            add_sub('importance',nx.eigenvector_centrality_numpy(graph.reverse()).values())
        if 'clustering' in evaluation_method or 'heterogeneity' in evaluation_method :
            add_sub('clustering',nx.clustering(graph.to_undirected()).values())
        if 'community_structure' in evaluation_method :
            add_sub('degree',graph.degree().values())
    else :
        if 'vertices' in evaluation_method :
            add_sub('vertices',2*nx.number_of_edges(graph)/(nodes*(nodes-1)))
        if 'communities' in evaluation_method :
            add_sub('communities',get_communities(graph))
        if 'degrees' in evaluation_method or 'community_structure' in evaluation_method :
            add_sub('degrees',graph.degree().values())
        if 'clustering' in evaluation_method or 'heterogeneity' in evaluation_method :
            add_sub('clustering',nx.clustering(graph).values())
        if 'importance' in evaluation_method :
            add_sub('importance',nx.eigenvector_centrality_numpy(graph).values())
    
    if 'distances' in evaluation_method :
        add_sub('distances',list(it.chain.from_iterable([ dict_of_length.values() for dict_of_length in nx.shortest_path_length(graph).values()])))
Example #14
0
def betweenness_centrality(graph):
    #centrality = nx.betweenness_centrality(graph, normalized=True)
    #centrality = nx.closeness_centrality(graph)
    centrality = nx.eigenvector_centrality_numpy(graph)
    nx.set_node_attributes(graph, 'centrality', centrality)
    degrees = sorted(centrality.items(), key=itemgetter(1), reverse=True)
    for idx, item in enumerate(degrees[0:10]):
        item = (idx+1,) + item
        print "%i. %s: %0.3f" % item
Example #15
0
def analyze_graph(G):
    betweenness = nx.betweenness_centrality(G)
    eigenvector = nx.eigenvector_centrality_numpy(G)
    closeness = nx.closeness_centrality(G)
    pagerank = nx.pagerank(G)
    degrees = G.degree()

    for name in G.nodes():
        G.node[name]['betweenness'] = betweenness[name]
        G.node[name]['eigenvector'] = eigenvector[name]
        G.node[name]['closeness'] = closeness[name]
        G.node[name]['pagerank'] = pagerank[name]
        G.node[name]['degree'] = degrees[name]
        
    components = nx.connected_component_subgraphs(G)
    i = 0    
    for cc in components:            
        #Set the connected component for each group
        for node in cc:
            G.node[node]['component'] = i
        i += 1
        
        cent_betweenness = nx.betweenness_centrality(cc)              
        cent_eigenvector = nx.eigenvector_centrality_numpy(cc)
        cent_closeness = nx.closeness_centrality(cc)
        
        for name in cc.nodes():
            G.node[name]['cc-betweenness'] = cent_betweenness[name]
            G.node[name]['cc-eigenvector'] = cent_eigenvector[name]
            G.node[name]['cc-closeness'] = cent_closeness[name]
    
    #Assign each person to his bigger clique    
    cliques = list(nx.find_cliques(G))
    j = 0
    for clique in cliques:
        clique_size = len(clique)
        for member in clique:
            if G.node[member]['clique-size'] < clique_size:
                G.node[member]['clique-size'] = clique_size
                G.node[member]['clique'] = j
        j +=1
    
    
    return G
Example #16
0
def perform_GA(Graphs, commGraphs, gtoidict, itogdict, genedict):
    '''
    Perform the GA algorithm here, Graphs has the original graphs with all
    Nodes in both top and bottom networks, commGraphs contains only the
    specific communities needed for the third part of the equation
    '''
    EVCTop = NX.eigenvector_centrality_numpy(Graphs['Top'])
    EVCBot = NX.eigenvector_centrality_numpy(Graphs['Bot'])

    randPop = produce_population(genedict)

    communities = split_graph_into_communities(commGraphs['Top'],
                                        C.best_partition(commGraphs['Top']))

    while (phi > 0):
        geneList = calc_fitness_x(randPop, EVCTop, EVCBot, gtoidict, itogdict,
                                  commGraphs['Top'], communities)
        filterList = filter_genes(geneList.values())
        break
Example #17
0
def centrailtyM(A,num=5):
    G=nx.DiGraph(A)
    ranks=np.zeros((num,8))
    ranks[:,0]=np.argsort(nx.in_degree_centrality(G).values())[::-1][:num]
    ranks[:,1]=np.argsort(nx.closeness_centrality(G).values())[::-1][:num]
    ranks[:,2]=np.argsort(nx.betweenness_centrality(G).values())[::-1][:num]
    ranks[:,3]=np.argsort(nx.eigenvector_centrality_numpy(G).values())[::-1][:num]
    ranks[:,4]=np.argsort(nx.katz_centrality_numpy(G,weight=None).values())[::-1][:num]
    ranks[:,5]=np.argsort(nx.pagerank_numpy(G,weight=None).values())[::-1][:num]
    return ranks
    def forUndirected(G):

        myList = [nx.eigenvector_centrality_numpy(G), 
                  nx.degree_centrality(G),
                  nx.betweenness_centrality(G),
                  nx.communicability_centrality(G), 
                  nx.load_centrality(G),   
                  nx.pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight'),
                  nx.clustering(G, weight='weight')]
        return myList
def calculateEigenCentrality_numpy(userConnectedGraph, counter):
    """
    calculates the eigenVector Centrality for given graph and writes the output to file
    parameters:
    userConnectedGraph - graph
    counter - int value for maintaining unique file names
    """
    eigenCentrality = nx.eigenvector_centrality_numpy(userConnectedGraph)
    writeCentralityOutput(eigenCentrality, path + 'eigenCentrality' + str(counter))
    plotgraph(conn, path, 'eigenCentrality' + str(counter))
 def test_K5(self):
     """Eigenvector centrality: K5"""
     G=networkx.complete_graph(5)
     b=networkx.eigenvector_centrality(G)
     v=math.sqrt(1/5.0)
     b_answer=dict.fromkeys(G,v)
     for n in sorted(G):
         assert_almost_equal(b[n],b_answer[n])
     b=networkx.eigenvector_centrality_numpy(G)
     for n in sorted(G):
         assert_almost_equal(b[n],b_answer[n],places=3)
Example #21
0
def run_metric(metric_name, G, domain, topic, metric_weight, use_norm, fileout, top_x):
    print '\n>> ' + 'Calculating ' + metric_name + ' for ' + domain + " - " + topic
    start_time = datetime.now()
    if metric_name == 'Degree':
        graph_metric = G.degree(nbunch=None, weight=metric_weight)
        normalize_metric(G, graph_metric, metric_weight)
    elif metric_name == 'In Degree':
        graph_metric = G.in_degree(nbunch=None, weight=metric_weight)
        normalize_metric(G, graph_metric, metric_weight)
    elif metric_name == 'Out Degree':
        graph_metric = G.out_degree(nbunch=None, weight=metric_weight)
        normalize_metric(G, graph_metric, metric_weight)
    elif metric_name == 'Closeness Centrality':
        graph_metric = nx.closeness_centrality(G, distance=None, normalized=use_norm)
        # use distance as weight? to increase importance as weight increase distance = 1/weight
    elif metric_name == 'Betweenness Centrality':
        graph_metric = nx.betweenness_centrality(G, normalized=use_norm, weight=metric_weight)
    elif metric_name == 'Eigenvector Centrality':
        try:
            graph_metric = nx.eigenvector_centrality(G, max_iter=1000)
            normalize_metric(G, graph_metric, metric_weight)
        except nx.exception.NetworkXError:
            # use numpy eigenvector if fail to converge
            print "power method for calculating eigenvector did not converge, using numpy"
            graph_metric = nx.eigenvector_centrality_numpy(G)
            normalize_metric(G, graph_metric, metric_weight)
    elif metric_name == 'Pagerank':
        try:
            graph_metric = nx.pagerank(G, weight=metric_weight)
            normalize_metric(G, graph_metric, metric_weight)
        except:
            # use numpy if fails to converge
            print "power method for calculating pagerank did not converge, using numpy"
            graph_metric = nx.pagerank_numpy(G, weight=metric_weight)
            normalize_metric(G, graph_metric, metric_weight)
    end_time = datetime.now()
    print "Calculation completed in: " + str(end_time - start_time)
    # append the entire list to the output file
    append_to_file(graph_metric, fileout, domain, topic, metric_name)
    '''
    ### output to screen the top x results
    # convert to a list of tuples
    graph_metric = graph_metric.items()
    # sort
    graph_metric.sort(key=lambda tup: -tup[1])
    # get and print the top X
    # print metric_results(graph_metric)
    top_list = take(top_x, graph_metric)
    for item in top_list:
        print ((item[0]) + "," + str(item[1]))
    '''
    return graph_metric
	def eigen_vector_centrality(self):

		#Compute the eigen vector centrality of the graph
		logging.info("Inside eigne vector centrality module")
		eigenvector_centrality_dict = nx.eigenvector_centrality_numpy(self.G)		
		logging.info("Eigen vector centrality dict count is %s " % (len(eigenvector_centrality_dict.keys())))
		
		eigen_vector_sorted_list = sorted(eigenvector_centrality_dict.items(), key=lambda x:x[1], reverse=True)[:3]
	
		for a,b in eigen_vector_sorted_list:
			logging.info("Eigen vector cent for %s is %s" % (a,b))
		
		eigenvector_centrality_dict = {}
Example #23
0
 def test_K5_unweighted(self):
     """Katz centrality: K5"""
     G = nx.complete_graph(5)
     alpha = 0.1
     b = nx.katz_centrality(G, alpha, weight=None)
     v = math.sqrt(1 / 5.0)
     b_answer = dict.fromkeys(G, v)
     for n in sorted(G):
         assert_almost_equal(b[n], b_answer[n])
     nstart = dict([(n, 1) for n in G])
     b = nx.eigenvector_centrality_numpy(G)
     for n in sorted(G):
         assert_almost_equal(b[n], b_answer[n], places=3)
def output_eigenvector_centrality_info (graph, path, nodes_dict):
    """Output Eigenvector centrality information about the graph.
       graph : (networkx.Graph)
       path: (String) contains the path to the output file
       nodes_dict: (dictionary) maps node id to node name
    """
    eigen_dict = nx.eigenvector_centrality_numpy(graph, weight='weight')
    eigen_dict = dict((nodes_dict[key], eigen_dict[key]) for key in nodes_dict if key in eigen_dict)
    eigen_list = dict_to_sorted_list(eigen_dict)

    with open(path, 'w') as out:
        out.write('***Eigenvector Centrality***\n')
        out.write('Node\tLayer\tEigenvector centrality\n')
        for element in eigen_list:
            out.write('%d\t%d\t%f\n' % (element[0][0], element[0][1], element[1]))
def network_value_distribution(orig_g_M, otherModel_M, name):
  eig_cents = [nx.eigenvector_centrality_numpy(g) for g in orig_g_M]  # nodes with eigencentrality
  net_vals = []
  for cntr in eig_cents:
      net_vals.append(sorted(cntr.values(), reverse=True))
  df = pd.DataFrame(net_vals)	  
  df = pd.DataFrame.from_dict(eig_cents[0].items())
  # Compute the eigenvector centrality for the graph G.
  # Dictionary of nodes with eigenvector centrality as the value.

  df.columns=['v','eig'] # eig: eigenvector centrality
  df['eig'].to_csv('Results/{}NetValue.tsv'.format(name), sep='\t', header=True, \
            #float_format='%4.18f ', 
            encoding='utf-8', index=True)

  print "orig"
  l = list(df.mean())
  zz = float(len(l))
  if not zz == 0:
      sa =  int(math.ceil(zz/75))
      for i in range(0, len(l), sa):
          print "(" + str(i) + "," + str(l[i]) + ")"

  eig_cents = [nx.eigenvector_centrality_numpy(g) for g in otherModel_M]  # nodes with eigencentrality
  net_vals = []
  for cntr in eig_cents:
      net_vals.append(sorted(cntr.values(), reverse=True))
  df = pd.DataFrame(net_vals)

  print "other model"
  l = list(df.mean())
  zz = float(len(l))
  if not zz == 0:
      sa =  int(math.ceil(zz/75))
      for i in range(0, len(l), sa):
          print "(" + str(i) + "," + str(l[i]) + ")"
Example #26
0
 def detectCenters(self):
     for community in self.communities:
         if len(self.Labels[community]) < 1:
             self.centers1.append(-1)
             self.centers2.append(-1)
             self.centers3.append(-1)
             self.centers4.append(-1)
             self.centers5.append(-1)
             self.centers6.append(-1)
         else:
             self.G1 = nx.MultiGraph()
             self.G1.add_nodes_from(self.Labels[community])
             for agent in self.Labels[community]:
                 r = np.where(self.trusts[agent, :] > 0)[0]
                 self.G1.add_edges_from(zip(r, [agent] * len(r)))
                 r = np.where(self.trusts[:, agent] > 0)[0]
                 self.G1.add_edges_from(zip(r, [agent] * len(r)))
             eigen = nx.eigenvector_centrality_numpy(self.G1)
             betweenness = nx.betweenness_centrality(self.G1)
             center1 = center2 = center3 = center4 = center5 = center6 = self.Labels[community][0]
             max_betweenness = betweenness[center1]
             max_degree = self.agents[center2].deg
             max_trustee = self.agents[center3].trustee
             max_trustor = self.agents[center4].trustor
             max_eigen = eigen[center6]
             for agent in self.Labels[community]:
                 if betweenness[agent] >= max_betweenness:
                     center1 = agent
                     max_betweenness = betweenness[agent]
                 if self.agents[agent].deg >= max_degree:
                     center2 = agent
                     max_degree = self.agents[agent].deg
                 if self.agents[agent].trustee >= max_trustee:
                     center3 = agent
                     max_trustee = self.agents[agent].trustee
                 if self.agents[agent].trustor >= max_trustor:
                     center4 = agent
                     max_trustor = self.agents[agent].trustor
                 if eigen[agent] >= max_eigen:
                     center6 = agent
                     max_eigen = eigen[agent]
             center5 = random.choice(self.Labels[community])
             self.centers1.append(center1)
             self.centers2.append(center2)
             self.centers3.append(center3)
             self.centers4.append(center4)
             self.centers5.append(center5)
             self.centers6.append(center6)
def getHugeStats(g):
    
    if nx.is_directed(g) == True:
        P1 = pd.DataFrame({'load_centrality': nx.load_centrality(g, weight='weight'),
                           'betweenness_centrality': nx.betweenness_centrality(g, weight='weight'),
                           
                           'pagerank': pd.Series(nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')),
                           'eigenvector_centrality': nx.eigenvector_centrality_numpy(g),
                           'degree_centrality': pd.Series(nx.degree_centrality(g)),
                           'in_degree_centrality': pd.Series(nx.in_degree_centrality(g)),
                           'out_degree_centrality': pd.Series(nx.out_degree_centrality(g))})
                           
    else:
        P1 = pd.Panel({'spl': pd.DataFrame(nx.shortest_path_length(g)),
                          'apdp': pd.DataFrame(nx.all_pairs_dijkstra_path(g)), 
                          'apdl': pd.DataFrame(nx.all_pairs_dijkstra_path_length(g)),
                          'c_exp': pd.DataFrame(nx.communicability_exp(g))})    
    return P1
Example #28
0
def testRandomCentralNode():
    df = readEdgeList('testEdgeList.csv')
    g = pandasToNetworkX(df)

    # make sure randomCentralNode is actually a node in the graph
    assert randomCentralNode(g) in g.nodes()

    # the following few lines sample 500 random nodes, getting
    # the most- and least-frequently chosen ones.
    # if the random sampling is being done right, then the
    # eigenvector centrality of the most-frequent node should be
    # larger than that of the least-frequent node (with high probability)
    smpl = Counter([randomCentralNode(g) for i in xrange(500)])
    top,topCount = smpl.most_common()[0]
    btm,btmCount = smpl.most_common()[-1]
    evc = nx.eigenvector_centrality_numpy(g)
    # this could fail by chance every once in a while
    # but it should be very rare
    assert evc[top] > evc[btm]
Example #29
0
def karate_rule(base, new):
    """
    The original karate club has two densly connected clusters, with a few critical bridges. This rule attempts 
    to simulate a growth function from this by first bringing together all components, then with a simple
    preferential attachment model to those actors with relatively high Eigenvector centrality.  The original 
    Zachary data forms two tightly clustered communities with a select few bridges. This growth rule is meant to 
    re-constrcut this in reverse, by first forming the bridges, then the polar hubs.
    """
    # If the base graph has multiple components, first attempt to unify them into a single component
    if nx.components.number_connected_components(base)>1:
        comps=nx.connected_components(base)
        # Select two random components and form bridge with new structure
        rand_comps=random_integers(low=0,high=len(comps)-1,size=2)
        # Select random node from each component
        rand0=comps[rand_comps[0]][random_integers(low=0,high=len(comps[rand_comps[0]])-1)]
        rand1=comps[rand_comps[1]][random_integers(low=0,high=len(comps[rand_comps[1]])-1)]
        while rand0==rand1:
            rand1=comps[rand_comps[1]][random_integers(low=0,high=len(comps[rand_comps[1]])-1)]
        outer_bound=[rand0,rand1]
        outer_bound.extend(range(base.number_of_nodes(),base.number_of_nodes()+((new.number_of_nodes())-1)))
        mapping=dict(zip(new.nodes(),outer_bound))
        new=nx.relabel_nodes(new,mapping)
    else:
        # Use Eigenvector centrality as pref attachment attribute
        cent=nx.eigenvector_centrality_numpy(base).items()
        # Normalize values to sum to 1.0
        norm_const=sum([(b) for (a,b) in cent])
        pref_prob=[(b/norm_const) for (a,b) in cent]
        # Step through probability mass to find a node to attach to. Same method used in 
        # gmm.algorithms.draw_structure to select a probability weighted motif from the set.
        draw=uniform()
        node_index=0
        mass_sum=pref_prob[node_index]
        while draw>mass_sum:
            node_index+=1
            mass_sum+=pref_prob[node_index]
        rand0=cent[node_index][0] # Return the appropriate node ID
        outer_bound=[rand0]
        outer_bound.extend(range(base.number_of_nodes(),base.number_of_nodes()+((new.number_of_nodes())-1)))
        mapping=dict(zip(new.nodes(),outer_bound))
        new=nx.relabel_nodes(new,mapping)
    return nx.compose(base,new)
Example #30
0
def get_network_average(coords, knn):
    # create all nodes
    print '\tCreating graph'
    g = nx.DiGraph()
    n_frames = coords.shape[0]
    g.add_nodes_from(range(n_frames))

    # get distances
    print '\tCalculating distance matrix'
    distances = get_dist_matrix(coords)

    # add edges
    print '\tAdding edges'
    for i in range(n_frames):
        # find knn + 1 nearest neighbors
        neighbors = numpy.argsort(distances[i, :])[:knn+1]
        for j in neighbors:
            if not i == j:
                g.add_edge(i, j)

    # turn the graph into a directed graph
    g = g.to_undirected(reciprocal=True)

    # find the largest connected component
    connected_components = nx.connected_component_subgraphs(g)
    largest_connected_component = connected_components[0]
    print '\t{} connected components.'.format( len(connected_components) )
    print '\tLargest connected component contains {} nodes.'.format( len(largest_connected_component) )

    # find most central point
    print '\tFinding most central point'
    centrality = nx.eigenvector_centrality_numpy(largest_connected_component)
    index_of_max = max( centrality.iteritems(), key=operator.itemgetter(1) )[0]

    # calculate the average of central point and all it's neighbors
    neighbors = largest_connected_component.neighbors(index_of_max)
    neighbors.append(index_of_max)
    average = numpy.mean( coords[neighbors,:], axis=0 )

    # return it
    return average
Example #31
0
# %%%%%%%%%%%%%%%
print('Nodes:', len(social_network.nodes()))
print('Edges:', len(social_network.edges()))

# %%%%%%%%%%%%%%%
L = max(nx.connected_component_subgraphs(social_network), key=len)
print("Nodes in largest sub component:", len(L.nodes()))
print("Edges in largest sub component:", len(L.edges()))

avg_cluster_index = nx.average_clustering(social_network)
print(avg_cluster_index)

# %%%%%%%%%%%%%%%
##### EIGEN VECTOR CENTRALITY #####
eigen_value = nx.eigenvector_centrality_numpy(social_network)
eigen_elite_user_avg = np.mean(
    [eigen_value[node] for node in eigen_value if node.Type == elites])
eigen_regular_avg = np.mean(
    [eigen_value[node] for node in eigen_value if node.Type == user_reg])
eigen_all_avg = np.mean([
    eigen_value[node] for node in eigen_value
    if node.Type == user_reg or node.Type == elites
])

print(eigen_elite_user_avg, eigen_regular_avg, eigen_all_avg)

# %%%%%%%%%%%%%%%
deg_central = nx.degree_centrality(social_network)
degree_elite_avg = np.mean(
    [deg_central[node] for node in deg_central if node.Type == elites])
Example #32
0
            curVillage = int(filename[37:-4])
            mat = pd.read_csv(os.path.join(directory, filename), header=None)
            show_graph_with_labels(mat.values, curVillage)

            FG = nx.from_numpy_matrix(mat.values)
            try:
                eigveccentNWX = pd.DataFrame.from_dict(
                    nx.algorithms.eigenvector_centrality(FG), orient='index'
                ).squeeze(
                )  #convert dictionary to pandas dataframe, maybe .squeeze() into a Series?
            except:
                print(
                    "Could not converge with power iteration for the Eigenvector Centrality for village: "
                    + str(curVillage))
                eigveccentNWX = pd.DataFrame.from_dict(
                    nx.eigenvector_centrality_numpy(FG), orient='index'
                ).squeeze(
                )  #https://stackoverflow.com/questions/43208737/using-networkx-to-calculate-eigenvector-centrality
            degcentNWX = pd.DataFrame.from_dict(
                nx.algorithms.degree_centrality(FG), orient='index'
            ).squeeze(
            )  #convert dictionary to pandas dataframe, maybe .squeeze() into a Series?
            closecentNWX = pd.DataFrame.from_dict(
                nx.algorithms.closeness_centrality(FG), orient='index'
            ).squeeze(
            )  #convert dictionary to pandas dataframe, maybe .squeeze() into a Series?
            betcentNWX = pd.DataFrame.from_dict(
                nx.algorithms.betweenness_centrality(FG), orient='index'
            ).squeeze(
            )  #convert dictionary to pandas dataframe, maybe .squeeze() into a Series?
 def test_eigenvector_centrality_unweighted_numpy(self):
     G = self.H
     p = nx.eigenvector_centrality_numpy(G)
     for (a, b) in zip(list(p.values()), self.G.evc):
         assert a == pytest.approx(b, abs=1e-7)
 def test_empty_numpy(self):
     with pytest.raises(nx.NetworkXException):
         e = nx.eigenvector_centrality_numpy(nx.Graph())
def principal_eigenvector(c: np.Array) -> List[float]:
    graph = nx.from_numpy_matrix(c.T, create_using=nx.DiGraph)
    return np.array([v for v in nx.eigenvector_centrality_numpy(graph, weight='weight').values()])
Example #36
0
    def __init__(self, contacts, modes, block_prob=0.1):
        self.contacts = contacts
        self.modes = modes
        self.people = np.unique(self.contacts[['p1', 'p2']])
        self.n = len(self.people)
        self.block_prob = {}
        p2id = {}
        for i, p in enumerate(self.people):
            p2id[p] = i
        self.contacts['id1'] = self.contacts['p1'].apply(lambda row: p2id[row])
        self.contacts['id2'] = self.contacts['p2'].apply(lambda row: p2id[row])

        # aggregate the contacts
        self.backbones = np.zeros([self.n, self.n])
        for i, c in self.contacts.iterrows():
            t, p1, p2 = c[['time', 'id1', 'id2']]
            self.backbones[p1, p2] += 1
            self.backbones[p2, p1] += 1

        self.agg_graph = nx.from_numpy_array(self.backbones)

        for mode in self.modes:
            if mode == 'degree product':
                adj = (self.backbones > 0)
                degree = np.sum(adj, axis=0)
                feature = np.outer(degree, degree)
            elif mode == 'r degree product':
                adj = (self.backbones > 0)
                degree = np.sum(adj, axis=0)
                feature = 1 / np.outer(degree, degree)
            elif mode == 'strength product':
                strength = np.sum(self.backbones, axis=0)
                feature = np.outer(strength, strength)
            elif mode == 'r strength product':
                strength = np.sum(self.backbones, axis=0)
                feature = 1 / np.outer(strength, strength)
            elif mode == 'betweeness':
                feature = np.zeros([self.n, self.n])
                bet = nx.algorithms.edge_betweenness_centrality(self.agg_graph)
                for k, v in bet.items():
                    feature[k] = v
                feature += feature.T
            elif mode == 'r betweeness':
                feature = np.zeros([self.n, self.n])
                bet = nx.algorithms.edge_betweenness_centrality(self.agg_graph)
                for k, v in bet.items():
                    if v > 0:
                        feature[k] = 1 / v
                feature += feature.T
            elif mode == 'link weight':
                feature = np.copy(self.backbones)
            elif mode == 'r link weight':
                feature = np.zeros([self.n, self.n])
                feature[self.backbones !=
                        0] = 1 / self.backbones[self.backbones != 0]
            elif mode == 'weighted eigen':
                eigen = nx.eigenvector_centrality_numpy(self.agg_graph,
                                                        weight='weight')
                feature = np.zeros([self.n, self.n])
                for i in range(self.n):
                    for j in range(self.n):
                        feature[i, j] = (eigen[i]) * (eigen[j])
            elif mode == 'r weighted eigen':
                eigen = nx.eigenvector_centrality_numpy(self.agg_graph,
                                                        weight='weight')
                feature = np.zeros([self.n, self.n])
                for i in range(self.n):
                    for j in range(self.n):
                        feature[i, j] = 1 / (eigen[i] * eigen[j])
            elif mode == 'unweighted eigen':
                eigen = nx.eigenvector_centrality_numpy(self.agg_graph)
                feature = np.zeros([self.n, self.n])
                for i in range(self.n):
                    for j in range(self.n):
                        feature[i, j] = eigen[i] * eigen[j]
            elif mode == 'r unweighted eigen':
                eigen = nx.eigenvector_centrality_numpy(self.agg_graph)
                feature = np.zeros([self.n, self.n])
                for i in range(self.n):
                    for j in range(self.n):
                        feature[i, j] = 1 / (eigen[i] * eigen[j])
            elif mode == 'random':
                feature = np.ones([self.n, self.n])
            else:
                print('unspported mode')
            block = feature * np.sum(self.backbones) / np.sum(
                feature * self.backbones) * block_prob
            while True:
                block[block > 1] = 1
                new = np.sum(self.backbones) * block_prob - np.sum(
                    self.backbones[block == 1])
                old = np.sum((block * self.backbones)[block < 1])
                block[block < 1] = block[block < 1] * new / old
                block[block > 1] = 1
                if abs(
                        np.sum(self.backbones * block) -
                        np.sum(self.backbones) * block_prob) < 0.01:
                    break
            self.block_prob[mode] = block
 def test_multigraph_numpy(self):
     e = nx.eigenvector_centrality_numpy(nx.MultiGraph())
Example #38
0
def Eigen_Centrality(G):
    Eigen_Centrality = nx.eigenvector_centrality_numpy(G)
    #Eigen_Centrality = nx.eigenvector_centrality(G)
    #nx.eigenvector_centrality_numpy
    #print "Eigen_Centrality:", sorted(Eigen_Centrality.iteritems(), key=lambda d:d[1], reverse = True)
    return Eigen_Centrality
def get_network_analytics(data_reduced):
    #calculate average breach and average beds occupied:
    average_breach_perc = data_reduced['breach_percentage'].mean()
    average_bed_occupancy = data_reduced['Total Occupied'].mean()

    # weighted edges first
    # count the number of times a specific transfer appears to get edge weight
    transfer_counts = data_reduced.groupby(['from', 'to']).count()

    # add the old index as a column - int he above the count became the index.
    transfer_counts = transfer_counts.reset_index()
    transfer_counts = transfer_counts[transfer_counts['ptid'] > 1]
    # Get a list of tuples that contain the values from the rows.
    edge_weight_data = transfer_counts[['from', 'to', 'ptid']]
    edge_weight_data.rename(index=str,
                            columns={'ptid': 'weight'},
                            inplace=True)
    sum_of_all_transfers = edge_weight_data['weight'].sum()
    #print(sum_of_all_transfers)
    #edge_weight_data['ptid'] = edge_weight_data['ptid']/sum_of_all_transfers
    #edge_weight_data.to_csv('edge_wdadult%s.csv' % str(i), header=True, index=False)
    weighted_edges = list(
        itertools.starmap(lambda f, t, w: (f, t, int(w)),
                          edge_weight_data.itertuples(index=False, name=None)))

    G = nx.DiGraph()
    # print(weighted_edges)
    G.add_weighted_edges_from(weighted_edges)

    en = G.number_of_edges()
    nn = G.number_of_nodes()

    # calculate the degree
    degrees = nx.classes.function.degree(G)

    degrees_list = [[n, d] for n, d in degrees]
    if b == 1000:
        print(degrees_list)

    degrees_data = pd.DataFrame(degrees_list, columns=['node', 'degree'])
    #degrees_data_degree = degrees_data['degree']
    degrees_data.to_csv('degrees_weadult%s.csv' % str(i),
                        header=True,
                        index=False)
    #look at degrees of the emergency department, need to change it to a dictionary to be able to look up the degree value for this node
    degrees_data.set_index('node', inplace=True)
    degrees_dict = degrees_data.to_dict()['degree']

    #check if there is data in this specific subset eg there may not be data in a weekend stress set in summer...
    if 'AE' in degrees_dict:
        emergency_degrees = degrees_dict['AE']
        #print('in dict')
        no_data = False
    else:
        #print('not in dict')
        no_data = True
        emergency_degrees = 0

    #degrees_list.append(list(degrees.values))
    #degrees_list.to_csv('degrees%s.csv' % str(i), header=True, index=False)

    #number of transfers from medical wards to theatre
    acute_to_theatre = G.get_edge_data('acute medical ward',
                                       'theatre',
                                       default={}).get('weight', 0)
    gen_to_theatre = G.get_edge_data('general medical ward',
                                     'theatre',
                                     default={}).get('weight', 0)
    card_to_theatre = G.get_edge_data('cardiology ward', 'theatre',
                                      default={}).get('weight', 0)
    rehab_to_theatre = G.get_edge_data('rehab', 'theatre',
                                       default={}).get('weight', 0)
    total_medical_to_theatre = acute_to_theatre + gen_to_theatre + card_to_theatre + rehab_to_theatre

    #number of circular or unnecessary ward transfers
    med_to_med_acute = G.get_edge_data('acute medical ward',
                                       'acute medical ward',
                                       default={}).get('weight', 0)
    med_to_med_acgen = G.get_edge_data('acute medical ward',
                                       'general medical ward',
                                       default={}).get('weight', 0)
    med_to_med_genac = G.get_edge_data('general medical ward',
                                       'acute medical ward',
                                       default={}).get('weight', 0)
    med_to_med_general = G.get_edge_data('general medical ward',
                                         'general medical ward',
                                         default={}).get('weight', 0)

    med_to_surg = G.get_edge_data('general medical ward',
                                  'general surgical ward',
                                  default={}).get('weight', 0)
    med_to_ortho = G.get_edge_data('general medical ward',
                                   ' orthopaedic ward',
                                   default={}).get('weight', 0)
    med_to_surg_acute = G.get_edge_data('acute medical ward',
                                        'general surgical ward',
                                        default={}).get('weight', 0)
    med_to_orth_acute = G.get_edge_data('acute medical ward',
                                        ' orthopaedic ward',
                                        default={}).get('weight', 0)
    acmed_to_ns = G.get_edge_data('acute medical ward', 'ns ward',
                                  default={}).get('weight', 0)
    genmed_to_ns = G.get_edge_data('general medical ward',
                                   'ns ward',
                                   default={}).get('weight', 0)
    acmed_to_atc = G.get_edge_data('acute medical ward',
                                   'ATC surgical ward',
                                   default={}).get('weight', 0)
    genmed_to_atc = G.get_edge_data('general medical ward',
                                    'ATC surgical ward',
                                    default={}).get('weight', 0)
    total_medical_ward_transfers = med_to_med_acute + med_to_med_general + med_to_med_acgen + med_to_med_genac + med_to_ortho + med_to_surg + med_to_surg_acute + med_to_orth_acute + acmed_to_ns + genmed_to_ns + acmed_to_atc + genmed_to_atc
    # print (total_medical_ward_transfers)

    ae_surg = G.get_edge_data('AE', 'general surgical ward', default={}).get(
        'weight',
        0) + G.get_edge_data('AE', 'orthopaedic ward', default={}).get(
            'weight',
            0) + G.get_edge_data('AE', 'ATC surgical ward', default={}).get(
                'weight',
                0) + G.get_edge_data('AE', 'gynae ward', default={}).get(
                    'weight', 0) + G.get_edge_data(
                        'AE', 'ns ward', default={}).get('weight', 0)
    print(ae_surg)
    ae_med = G.get_edge_data('AE', 'acute medical ward', default={}).get(
        'weight',
        0) + G.get_edge_data('AE', 'general medical ward', default={}).get(
            'weight',
            0) + G.get_edge_data('AE', 'cardiology ward', default={}).get(
                'weight', 0) + G.get_edge_data('AE', 'rehab', default={}).get(
                    'weight', 0) + G.get_edge_data(
                        'AE', 'cdu', default={}).get('weight', 0)
    if ae_surg == 0:
        ratio_wards_surg_med = 0
    else:
        ratio_wards_surg_med = ae_med / ae_surg

    # calculate the centrality of each node - fraction of nodes the incoming/outgoing edges are connected to
    incentrality = nx.algorithms.centrality.in_degree_centrality(G)
    # check if the theatre node exists in this data subset
    if 'theatre' in incentrality:
        in_theatre_centrality = incentrality['theatre']
    else:
        in_theatre_centrality = 0

    outcentrality = nx.algorithms.centrality.out_degree_centrality(G)
    if 'theatre' in outcentrality:
        out_theatre_centrality = outcentrality['theatre']
    else:
        out_theatre_centrality = 0

    if 'AE' in outcentrality:
        out_ed_centrality = outcentrality['AE']
    else:
        out_ed_centrality = 0

    # flow hiearchy - finds strongly connected components
    if nn == 0:
        flow_hierarchy = 0
    else:
        flow_hierarchy = nx.algorithms.hierarchy.flow_hierarchy(G)

    bet_centr = nx.algorithms.centrality.betweenness_centrality(G)
    if 'theatre' in bet_centr:
        theatres_bet_centrality = bet_centr['theatre']
    else:
        theatres_bet_centrality = 0

    if en == 0:
        theatres_eigen_centr = 0
        ed_eigen_centr = 0
        assortativity_net_inout = 0
    else:
        eigen_centr = nx.eigenvector_centrality_numpy(G)
        assortativity_net_inout = nx.degree_assortativity_coefficient(
            G, x='out', y='in', weight='weights')
        if 'theatre' in eigen_centr:
            theatres_eigen_centr = eigen_centr['theatre']
        else:
            theatres_eigen_centr = 0

        if 'AE' in eigen_centr:
            ed_eigen_centr = eigen_centr['AE']

        else:
            ed_eigen_centr = 0

    density_net = nx.density(G)
    transitivity_net = nx.transitivity(G)

    data_list.append({
        'month': i,
        'number of transfers': len(data_reduced['transfer_day']),
        'number nodes': nn,
        'number edges': en,
        'flow hierarchy': flow_hierarchy,
        'emergency degrees': emergency_degrees,
        'outcentrality ed': out_ed_centrality,
        'incentrality theatres': in_theatre_centrality,
        'outcentrality theatres': out_theatre_centrality,
        'bet centrality theatres': theatres_bet_centrality,
        'medical to theatre': total_medical_to_theatre,
        'medical ward transfers': total_medical_ward_transfers,
        'med surg ratio': ratio_wards_surg_med,
        'eigen_centr_theatre': theatres_eigen_centr,
        'eigen_centr_ed': ed_eigen_centr,
        'density': density_net,
        'transitivity': transitivity_net,
        'average_breach_percentage': average_breach_perc,
        'average bed occupancy': average_bed_occupancy
    })

    return data_list
Example #40
0
print("Exportação do arquivo GraphML com a rede:")

nx.write_graphml(DG, '/home/akina/Dropbox/TCC/Resultados/Graphml/rede_financiamento.graphml')


################################################

print("-----------------------------------------------------")
print("Exportação dos resultados dos cálculos de centralidade:")


################################################
print("1 - Cálculo da centralidade de autovetor")
#eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0)

eigenvector_centrality = nx.eigenvector_centrality_numpy(DG, "weight", len(DG), 0)
#print(['{}, {:0.20f}'.format(node, eigenvector_centrality[node]) for node in eigenvector_centrality], file=open('/home/akina/Dropbox/TCC/Dados/teste_eigenvector_centrality.csv', "w"))


csv_file_eigenvector = '/home/akina/Dropbox/TCC/Resultados/Centralidades_Geral_Todos/eigenvector_centrality.csv'
with open(csv_file_eigenvector, "w") as output_eigenvector:
    writer = csv.writer(output_eigenvector)
    for node in sorted(eigenvector_centrality, key=lambda x: eigenvector_centrality[x], reverse=True):
        writer.writerow(
            ["{}".format(DG.node[node]["nome_rotulo_vertice"]), "{:0.20f}".format(eigenvector_centrality[node])])


csv_file_eigenvector_votos = '/home/akina/Dropbox/TCC/Resultados/Centralidades_Candidatos_Votos/votos_eigenvector_centrality.csv'
with open(csv_file_eigenvector_votos, "w") as output_eigenvector_votos:
    writer = csv.writer(output_eigenvector_votos)
    for node in sorted(eigenvector_centrality, key=lambda x: eigenvector_centrality[x], reverse=True):
Example #41
0
        print("1")
        measure_unsorted[0][j] = measure[0][j]
        measure[0][j] = sorted(measure[0][j].items(),
                               key=operator.itemgetter(1),
                               reverse=True)  ## descending

    for j in range(no_of_graphs):
        measure[1][j] = nx.closeness_centrality(graph_list[j])
        print("2")
        measure_unsorted[1][j] = measure[1][j]
        measure[1][j] = sorted(measure[1][j].items(),
                               key=operator.itemgetter(1),
                               reverse=True)  ## descending

    for j in range(no_of_graphs):
        measure[2][j] = nx.eigenvector_centrality_numpy(graph_list[j])
        print("3")
        measure_unsorted[2][j] = measure[2][j]
        measure[2][j] = sorted(measure[2][j].items(),
                               key=operator.itemgetter(1),
                               reverse=True)  ## descending

    with open(
            "/home/sukanya/Desktop/jadavpur_internship/CODES/Result_corr_coeff.odt",
            "a+") as sn:
        sn.write("\n")
        sn.write(" FOR GRAPH 6 :----")
        sn.write("\n\n")
        ##### correlation coefficient between degree and closeness centrality
        a, b = zip(*sorted(measure_unsorted[0][i].items()))
        x = b
 def test_eigenvector_centrality_unweighted_numpy(self):
     G = self.H
     p = nx.eigenvector_centrality_numpy(G)
     for (a, b) in zip(list(p.values()), self.G.evc):
         assert_almost_equal(a, b)
def centrality(cname, directed=False):

    C = np.genfromtxt('dat/' + cname + '.dat')

    if ((directed == True) & (np.max(C) == 2)):
        C = C.T / 2.0
    # print np.max(C)

    if directed == True:
        G = nx.from_numpy_matrix(C, create_using=nx.DiGraph())
        BC = np.asarray(nx.betweenness_centrality(G).values())
        IC = np.asarray(nx.in_degree_centrality(G).values())
        OC = np.asarray(nx.out_degree_centrality(G).values())
        EC = np.asarray((nx.eigenvector_centrality_numpy(G)).values())
        KC = np.asarray((nx.katz_centrality(G)).values())
        CC = np.asarray((nx.closeness_centrality(G)).values())
        LC = np.asarray((nx.load_centrality(G)).values())

        i_BC = BC.argsort()[::-1]
        i_IC = IC.argsort()[::-1]
        i_OC = OC.argsort()[::-1]
        i_EC = EC.argsort()[::-1]
        i_KC = KC.argsort()[::-1]
        i_CC = CC.argsort()[::-1]
        i_LC = LC.argsort()[::-1]

        np.savetxt('dat/BC-' + cname + '.txt',
                   zip(i_BC,
                       np.sort(BC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/IC-' + cname + '.txt',
                   zip(i_IC,
                       np.sort(IC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/OC-' + cname + '.txt',
                   zip(i_OC,
                       np.sort(OC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/EC-' + cname + '.txt',
                   zip(i_EC,
                       np.sort(EC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/KC-' + cname + '.txt',
                   zip(i_KC,
                       np.sort(KC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/CC-' + cname + '.txt',
                   zip(i_CC,
                       np.sort(CC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/LC-' + cname + '.txt',
                   zip(i_LC,
                       np.sort(LC)[::-1]),
                   fmt='%10d %15.9f')

    if directed == False:
        G = nx.from_numpy_matrix(C)
        BC = np.asarray(nx.betweenness_centrality(G).values())
        DC = np.asarray(nx.degree_centrality(G).values())
        LC = np.asarray((nx.load_centrality(G)).values())
        CC = np.asarray((nx.closeness_centrality(G)).values())
        EC = np.asarray((nx.eigenvector_centrality_numpy(G)).values())

        i_BC = BC.argsort()[::-1]
        i_DC = DC.argsort()[::-1]
        i_LC = LC.argsort()[::-1]
        i_CC = CC.argsort()[::-1]
        i_EC = EC.argsort()[::-1]

        np.savetxt('dat/BC-' + cname + '.txt',
                   zip(i_BC,
                       np.sort(BC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/DC-' + cname + '.txt',
                   zip(i_DC,
                       np.sort(DC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/LC-' + cname + '.txt',
                   zip(i_LC,
                       np.sort(LC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/CC-' + cname + '.txt',
                   zip(i_CC,
                       np.sort(CC)[::-1]),
                   fmt='%10d %15.9f')
        np.savetxt('dat/EC-' + cname + '.txt',
                   zip(i_EC,
                       np.sort(EC)[::-1]),
                   fmt='%10d %15.9f')
Example #44
0
import networkx as nx
import csv
import scipy
import numpy
f = open('/Users/dukechan/Downloads/eigenvector_centrality.txt', 'w')
csv_file = "/Users/dukechan/Downloads/sms_sna_oct18_undirected-1.csv"
reader = csv.reader(file(csv_file))
G = nx.Graph()
for line in reader:
    G.add_edge(line[4], line[5])
E = nx.eigenvector_centrality_numpy(G)
for item in E:
    f.write("Node: %s Centrality %.10f\n" % (item, E[item]))
f.close()
 def test_empty_numpy(self):
     e = networkx.eigenvector_centrality_numpy(networkx.Graph())
Example #46
0
def network_equilibrium(n, d, p, graph=False):
    """
    Returns a network with the same properties as a human social network, namely high clustering, low average shortest
    distance and a skewed degree distribution. This is achieved by applying an algorithm that makes new connections
    using prestige and distance. The algorithm includes a birth-death process which prevents the network from reaching
    completness.

    Parameters
    ----------
    n : int
        Size of network, where the network has nxn nodes
    d : float
        Strength of the distance decay function
    p : float
        Probability of a node losing most of its edges at a given iteration
    graph : bool
        Whether or not the a graph for the geodesics value at each round is printed

    Notes
    -----
    The algorithm works by taking a graph and at each iteration adding an edge between nodes based on prestige
    mechanics. At every iteration there is also a chance that one node gets all but its initial four edges (to its left,
    right, up, and down) removed.
    """
    G = nx.grid_2d_graph(n, n, True)
    G = nx.convert_node_labels_to_integers(G)

    initial_nbrs = {}
    for node in list(G.nodes):
        nbrs = [nbr for nbr in G[node]]
        initial_nbrs[node] = nbrs

    nodes = list(G.nodes)
    num_nodes = len(nodes)

    with open(r'data\{0}x{0} p={1} d={2}.csv'.format(n, p, d), 'w', newline='') as file:

        fields = ['iterations', 'edges', 'geodesic', 'clustering', 'movement', 'move_avg', 'avg_degree', 'degree_skew',
                  'alpha', 'KS', 'p_KS', 'alpha1', 'alpha2', 'switch', 'KS_double', 'p_KS_double']
        writer = csv.DictWriter(file, fieldnames=fields)
        writer.writeheader()

        start = {}
        start['iterations'] = 0
        start['edges'] = nx.number_of_edges(G)
        geo = nx.average_shortest_path_length(G)
        start['geodesic'] = geo
        start['clustering'] = nx.average_clustering(G)
        start['movement'] = 'N/A'
        start['move_avg'] = 'N/A'
        degrees = [G.degree(n) for n in nodes]
        start['avg_degree'] = np.mean(degrees)
        start['degree_skew'] = stats.skew(degrees)

        alpha, ks, p_ks, alpha1, alpha2, switch, ks2, p_ks2 = ks_test(G)
        start['alpha'] = alpha
        start['KS'] = ks
        start['p_KS'] = p_ks
        start['alpha1'] = alpha1
        start['alpha2'] = alpha2
        start['switch'] = switch
        start['KS_double'] = ks2
        start['p_KS_double'] = p_ks2

        writer.writerow(start)

        # Necessary for graphing change in clustering and geodesic
        x_vals = [0]
        geos = [geo]

        # Used for gathering info on behaviour of removal phase
        in_a_row = 0
        iterations = 0

        # Used to measure whether the network is in equilibrium
        prev_geo = nx.average_shortest_path_length(G)
        movement = []

        while in_a_row < 3:
            for i in range(num_nodes):
                iterations += 1

                # Select random person
                node = random.choice(nodes)
                odds = []
                centrality = nx.eigenvector_centrality_numpy(G)
                nbrs = [nbr for nbr in G[node]]
                nbrs.append(node)

                for optn in nodes:
                    if optn in nbrs:
                        odds.append(0)
                    else:
                        dist = nx.shortest_path_length(G, node, optn)
                        # Set the odds of being connected to based on eigenvector centrality of the option and its
                        # distance from n
                        w = centrality[optn] * math.exp(-d*dist)
                        odds.append(w)

                # Select at random a new connection for n from the list of options given the assigned odds
                a = random.choices(nodes, weights=odds, k=1)[0]
                G.add_edge(node, a)

                # Removal only has a p probability of occurring
                if random.random() < p:
                    # count_remove_n += 1

                    # Select node for removal
                    rmv = random.choice(nodes)
                    # List all neighbours of the node being removed
                    nbrs = [nbr for nbr in G[rmv]]
                    num_nbrs = len(nbrs)
                    to_add = []

                    for nbr in nbrs:
                        # The 4 initial neighbours always stay connected
                        if nbr in initial_nbrs[rmv]:
                            to_add.append(nbr)
                            # count_keep_e += 1
                        else:
                            # Every other connection is given a probability of maintaining their connection
                            mutuals = list(nx.common_neighbors(G, rmv, nbr))
                            odd = ((len(mutuals) + 1) / num_nbrs)
                            if random.random() < odd:
                                to_add.append(nbr)

                    # Update the connections
                    G.remove_node(rmv)
                    G.add_node(rmv)
                    for choice in to_add:
                        G.add_edge(rmv, choice)

            end_of_round = {}
            end_of_round['iterations'] = iterations
            geo = nx.average_shortest_path_length(G)
            end_of_round['edges'] = nx.number_of_edges(G)
            end_of_round['geodesic'] = geo
            end_of_round['clustering'] = nx.average_clustering(G)
            move = geo - prev_geo
            prev_geo = geo
            end_of_round['movement'] = move
            degrees = [G.degree(node) for node in nodes]
            end_of_round['avg_degree'] = np.mean(degrees)
            end_of_round['degree_skew'] = stats.skew(degrees)
            movement.append(move)
            check = np.mean(movement)
            end_of_round['move_avg'] = check

            alpha, ks, p_ks, alpha1, alpha2, switch, ks2, p_ks2 = ks_test(G)
            end_of_round['alpha'] = alpha
            end_of_round['KS'] = ks
            end_of_round['p_KS'] = p_ks
            end_of_round['alpha1'] = alpha1
            end_of_round['alpha2'] = alpha2
            end_of_round['switch'] = switch
            end_of_round['KS_double'] = ks2
            end_of_round['p_KS_double'] = p_ks2

            writer.writerow(end_of_round)

            x_vals.append(iterations)
            geos.append(geo)

            if abs(check) < 0.001:
                in_a_row += 1
            else:
                in_a_row = 0

    if graph:
        plt.scatter(x_vals, geos, s=10)
        plt.title("Geodesic Equilibrium")
        plt.xlabel("# of Iterations")
        plt.ylabel("Average Geodesic")
        plt.axhline(3.4, c='black', lw=1)
        plt.savefig(r'graphs\{0}x{0} p={1} d={2} geodesic.csv'.format(n, p, d))
        plt.close()

    return G