def augmentNodes(g):
    r1 = nx.eigenvector_centrality_numpy(g)
    r2 = nx.degree_centrality(g) # DP MY
    r3 = nx.betweenness_centrality(g)
    r5 = nx.load_centrality(g,weight='weight') # DY, WY-writename # Scientific collaboration networks: II. Shortest paths, weighted networks, and centrality, M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
    r6 = nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')
    
    if nx.is_directed(g) == True:
        r8 = nx.in_degree_centrality(g)
        r9 = nx.out_degree_centrality(g)
#        r10 = nx.hits(g, max_iter=100, tol=1e-08, nstart=None)
    else:
        r4 = nx.communicability_centrality(g)
        r7 = nx.clustering(g, weight='weight')
        
    for x in g.nodes():
        g.node[x]['eigenvector_centrality_numpy'] = r1[x]
        g.node[x]['degree_centrality'] = r2[x]  
        g.node[x]['betweenness_centrality'] = r3[x]
        g.node[x]['load_centrality'] = r5[x]  
        g.node[x]['pagerank'] = r6[x]

        if nx.is_directed(g) == True:
            g.node[x]['in_degree_centrality'] = r8[x]
            g.node[x]['out_degree_centrality'] = r9[x]
#            g.node[x]['hits'] = r10[x]
        else:
            g.node[x]['communicability_centrality'] = r4[x]
            g.node[x]['clustering'] = r7[x]
    return g        
Example #2
0
def load_component(seed_num, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None:
    return []

  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  components = list(nx.connected_components(G))
  components = filter(lambda x: len(x) > 0.1 * len(G), components)
  total_size = sum(map(lambda x: len(x), components))
  total_nodes = 0
  rtn = []
  for comp in components[1:]:
    num_nodes = int(float(len(comp)) / total_size * seed_num)
    component = G.subgraph(list(comp))
    clse_cent = nx.load_centrality(component)
    collector = collections.Counter(clse_cent)
    clse_cent = collector.most_common(num_nodes)
    rtn += map(lambda (x, y): x, clse_cent)
    total_nodes += num_nodes

  num_nodes = seed_num - total_nodes
  component = G.subgraph(list(components[0]))
  clse_cent = nx.load_centrality(component)
  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(num_nodes)
  rtn += map(lambda (x, y): x, clse_cent)
  return rtn
 def test_not_strongly_connected(self):
     b = nx.load_centrality(self.D)
     result = {0: 5./12,
               1: 1./4,
               2: 1./12,
               3: 1./4,
               4: 0.000}
     for n in sorted(self.D):
         assert_almost_equal(result[n], b[n], places=3)
         assert_almost_equal(result[n], nx.load_centrality(self.D, n), places=3)
 def test_p3_load(self):
     G=self.P3
     c=nx.load_centrality(G)
     d={0: 0.000,
        1: 1.000,
        2: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
     c=nx.load_centrality(G,v=1)
     assert_almost_equal(c,1.0)
     c=nx.load_centrality(G,v=1,normalized=True)
     assert_almost_equal(c,1.0)
def analyze_graph(G):    
    #centralities and node metrics
    out_degrees = G.out_degree()
    in_degrees = G.in_degree()
    betweenness = nx.betweenness_centrality(G)
    eigenvector = nx.eigenvector_centrality_numpy(G)
    closeness = nx.closeness_centrality(G)
    pagerank = nx.pagerank(G)
    avg_neighbour_degree = nx.average_neighbor_degree(G)
    redundancy = bipartite.node_redundancy(G)
    load = nx.load_centrality(G)
    hits = nx.hits(G)
    vitality = nx.closeness_vitality(G)
    
    for name in G.nodes():
        G.node[name]['out_degree'] = out_degrees[name]
        G.node[name]['in_degree'] = in_degrees[name]
        G.node[name]['betweenness'] = betweenness[name]
        G.node[name]['eigenvector'] = eigenvector[name]
        G.node[name]['closeness'] = closeness[name]
        G.node[name]['pagerank'] = pagerank[name]
        G.node[name]['avg-neigh-degree'] = avg_neighbour_degree[name]
        G.node[name]['redundancy'] = redundancy[name]
        G.node[name]['load'] = load[name]
        G.node[name]['hits'] = hits[name]
        G.node[name]['vitality'] = vitality[name]
        
    #communities
    partitions = community.best_partition(G)
    for member, c in partitions.items():
        G.node[member]['community'] = c   
    
    return G
Example #6
0
 def most_central(self,F=1,cent_type='betweenness'):
     if cent_type == 'betweenness':
         ranking = nx.betweenness_centrality(self.G).items()
     elif cent_type == 'closeness':
         ranking = nx.closeness_centrality(self.G).items()
     elif cent_type == 'eigenvector':
         ranking = nx.eigenvector_centrality(self.G).items()
     elif cent_type == 'harmonic':
         ranking = nx.harmonic_centrality(self.G).items()
     elif cent_type == 'katz':
         ranking = nx.katz_centrality(self.G).items()
     elif cent_type == 'load':
         ranking = nx.load_centrality(self.G).items()
     elif cent_type == 'degree':
         ranking = nx.degree_centrality(self.G).items()
     ranks = [r for n,r in ranking]
     cent_dict = dict([(self.lab[n],r) for n,r in ranking])
     m_centrality = sum(ranks)
     if len(ranks) > 0:
         m_centrality = m_centrality/len(ranks)
     #Create a graph with the nodes above the cutoff centrality- remove the low centrality nodes
     thresh = F*m_centrality
     lab = {}
     for k in self.lab:
         lab[k] = self.lab[k]
     g = Graph(self.adj.copy(),self.char_list)
     for n,r in ranking:
         if r < thresh:
             g.G.remove_node(n)
             del g.lab[n]
     return (cent_dict,thresh,g)
Example #7
0
def load_neighbors(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.load_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "hi load neighbors"
  
  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(SURROUND_TOP)
  nodes = map(lambda (x, y): x, clse_cent)

  current_seed = 0
  rtn = []
  while current_seed < seed_num:
    current_node = nodes[current_seed % len(nodes)]
    current_neighbors = G.neighbors(current_node)
    rtn += random.sample(set(current_neighbors) - set(rtn) - set(nodes), 1)
    current_seed += 1

  return rtn
def centrality(net):
    values ={}
    close = nx.closeness_centrality(net, normalized= True)
    eigen = nx.eigenvector_centrality_numpy(net)
    page = nx.pagerank(net)
    bet = nx.betweenness_centrality(net,normalized= True)
    flow_c = nx.current_flow_closeness_centrality(net,normalized= True)
    flow_b = nx.current_flow_betweenness_centrality(net,normalized= True)
    load = nx.load_centrality(net, normalized = True)
    com_c = nx.communicability_centrality(net)
    com_b = nx.communicability_betweenness_centrality(net, normalized= True)
    degree = net.degree()
    
    file3 = open("bl.csv",'w')
    for xt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
        for yt in [bet,load,degree,page,flow_b,com_c,com_b,eigen,close,flow_c]:#[impo,bet,flow_b,load,com_c,com_b] :
            corr(xt.values(),yt.values(),file3)
        print
        file3.write("\n")
    file3.close()
    #plt.plot(x,y, 'o')
    #plt.plot(x, m*x + c, 'r', label='Fitted line')
    #plt.show()
    #for key,item in close.iteritems() :
        #values[key] = [impo.get(key),bet.get(key),flow_b.get(key), load.get(key),com_c.get(key),com_b.get(key)]
        
    return values
 def f36(self):
     start = 0
     s = nx.load_centrality(self.G).values()
     res = sum(s)
     stop = 0
     # self.feature_time.append(stop - start)
     return res
 def test_p2_load(self):
     G=nx.path_graph(2)
     c=nx.load_centrality(G)
     d={0: 0.000,
        1: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
        def load_centrality_month_airports(data):    
            df = data.copy()
            df['DateOfDeparture'] = pd.to_datetime(df['DateOfDeparture'])
            df['month'] = df['DateOfDeparture'].dt.week.astype(str)
            df['year'] = df['DateOfDeparture'].dt.year.astype(str)
            df['year_month'] = df[['month','year']].apply(lambda x: '-'.join(x),axis=1)
            df['year_month_dep'] = df[['Departure','month','year']].apply(lambda x: '-'.join(x),axis=1)
            df['year_month_arr'] = df[['Arrival','month','year']].apply(lambda x: '-'.join(x),axis=1)
            year_month = pd.unique(df['year_month'])
            G = nx.Graph()
            load_centrality = {}

            for i, item in enumerate(year_month):
                sub_df = df[df['year_month'] == item][['Departure','Arrival']]
                list_dep_arr = zip(sub_df['Departure'], sub_df['Arrival'])
                G.add_edges_from(list_dep_arr)
                #G.number_of_nodes()
                #G.number_of_edges()
                centrality_month = nx.load_centrality(G)
                centrality_month = pd.DataFrame(centrality_month.items())
                centrality_month['year_month'] = [item] * centrality_month.shape[0]
                centrality_month['airport_year_month'] = centrality_month[centrality_month.columns[[0,2]]].apply(lambda x: '-'.join(x),axis=1)
                centrality_month =dict(zip(centrality_month['airport_year_month'], centrality_month[1]))

                z = load_centrality.copy()
                z.update(centrality_month)
                load_centrality = z
            df['load_centrality_month_dep'] = df['year_month_dep'].map(load_centrality)
            df['load_centrality_month_arr'] = df['year_month_arr'].map(load_centrality)
            return df
Example #12
0
def node_load_centrality(X):
    """
    based on networkx function: load_centrality
    """
    XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
    for i, value in enumerate(X):
        adj_mat = value.reshape((np.sqrt(len(value)),-1))
        adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
        adj_mat = 1 - adj_mat

#        th = np.mean(adj_mat) - 0.05
#        adj_mat = np.where(adj_mat < th, adj_mat, 0.)

        percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.86)
        print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))

        g = nx.from_numpy_matrix(adj_mat)
        print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
        print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))

        deg_cent = nx.load_centrality(g, weight = 'weight')
        node_cent = np.zeros(g.number_of_nodes())

        for k in deg_cent:
            node_cent[k] = deg_cent[k]
        XX[i] = node_cent
        print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
#    XX = XX*100
    ss = StandardScaler()
    XX = ss.fit_transform(XX.T).T

    return XX
 def test_unnormalized_p3_load(self):
     G=self.P3
     c=nx.load_centrality(G,normalized=False)
     d={0: 0.000,
        1: 2.000,
        2: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
 def test_p3_load(self):
     G=self.P3
     c=nx.load_centrality(G)
     d={0: 0.000,
        1: 1.000,
        2: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
    def forUndirected(G):

        myList = [nx.eigenvector_centrality_numpy(G), 
                  nx.degree_centrality(G),
                  nx.betweenness_centrality(G),
                  nx.communicability_centrality(G), 
                  nx.load_centrality(G),   
                  nx.pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight'),
                  nx.clustering(G, weight='weight')]
        return myList
Example #16
0
 def calcAndPickleLoadCentrality(self):
     """
     Calculates the load centrality for all nodes in the network and
     saves the resulting dictionary to the hard drive.
     Note: very similar to betweenness centrality
     """
     # Convert the DiGraph to normal graph
     G = self.G.to_undirected()
     t = time.time()
     print '\nCalculating load centrality...'
     self.loadCentrality = nx.load_centrality(G)
     print 'Load centrality calculation time: ' + str(int(time.time() - t)) + ' seconds'
     pickle.dump(self.loadCentrality, open(self.dirName + '/load_centrality.pickle', 'w'))
 def test_krackhardt_load(self):
     G=self.K
     c=nx.load_centrality(G)
     d={0: 0.023,
        1: 0.023,
        2: 0.000,
        3: 0.102,
        4: 0.000,
        5: 0.231,
        6: 0.231,
        7: 0.389,
        8: 0.222,
        9: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
def calculate_centrality(G):
	# dc_dumps = json.dumps(nx.degree_centrality(G).items(),sort_keys=True,indent=4)
	# dc_loads = json.loads(dc_dumps)
	dc_sorted = sorted(nx.degree_centrality(G).items(), key=itemgetter(0), reverse=True)
	bc_sorted = sorted(nx.betweenness_centrality(G).items(), key=itemgetter(0), reverse=True)
	clc_sorted = sorted(nx.closeness_centrality(G).items(), key=itemgetter(0), reverse=True)
	coc_sorted = sorted(nx.communicability_centrality(G).items(), key=itemgetter(0), reverse=True)
	lc_sorted = sorted(nx.load_centrality(G).items(), key=itemgetter(0), reverse=True)
	cfbc_sorted = sorted(nx.current_flow_betweenness_centrality(G).items(), key=itemgetter(0), reverse=True)
	cfcc_sorted = sorted(nx.current_flow_closeness_centrality(G).items(), key=itemgetter(0), reverse=True)
	# print ec_sorted[0]
	
	developer_centrality = []

	developer_file = file("public/wordpress/developer.json")
	developers = json.load(developer_file)
	for developer in developers:
		degree = 0
		betweenness = 0
		closeness = 0
		communicability = 0
		load = 0
		current_flow_betweenness = 0
		current_flow_closeness = 0
		for i in range (0, len(dc_sorted)):
			# if ( not dc_sorted[i][0] == bc_sorted[i][0] == clc_sorted[i][0] == coc_sorted[i][0] == lc_sorted[i][0] == cfbc_sorted[i][0]):
			# 	print 'false'
			if( developer['developer'] == dc_sorted[i][0]):
				degree = dc_sorted[i][1]
				betweenness = bc_sorted[i][1]
				closeness = clc_sorted[i][1]
				communicability = coc_sorted[i][1]
				load = lc_sorted[i][1]
				current_flow_betweenness = cfbc_sorted[i][1]
				current_flow_closeness = cfcc_sorted[i][1]

		developer_centrality.append({
			'name': developer['developer'],
		 	'degree': degree,
			'betweenness': betweenness,
			'closeness': closeness,
			'communicability': communicability,
			'load': load,
			'current_flow_betweenness': current_flow_betweenness,
			'current_flow_closeness':current_flow_closeness,
		 })

	return developer_centrality
    def test_unnormalized_krackhardt_load(self):
        G=self.K
        c=nx.load_centrality(G,normalized=False)
        d={0: 1.667,
           1: 1.667,
           2: 0.000,
           3: 7.333,
           4: 0.000,
           5: 16.667,
           6: 16.667,
           7: 28.000,
           8: 16.000,
           9: 0.000}

        for n in sorted(G):
            assert_almost_equal(c[n],d[n],places=3)
def getHugeStats(g):
    
    if nx.is_directed(g) == True:
        P1 = pd.DataFrame({'load_centrality': nx.load_centrality(g, weight='weight'),
                           'betweenness_centrality': nx.betweenness_centrality(g, weight='weight'),
                           
                           'pagerank': pd.Series(nx.pagerank(g, alpha=0.85, personalization=None, max_iter=100, tol=1e-08, nstart=None, weight='weight')),
                           'eigenvector_centrality': nx.eigenvector_centrality_numpy(g),
                           'degree_centrality': pd.Series(nx.degree_centrality(g)),
                           'in_degree_centrality': pd.Series(nx.in_degree_centrality(g)),
                           'out_degree_centrality': pd.Series(nx.out_degree_centrality(g))})
                           
    else:
        P1 = pd.Panel({'spl': pd.DataFrame(nx.shortest_path_length(g)),
                          'apdp': pd.DataFrame(nx.all_pairs_dijkstra_path(g)), 
                          'apdl': pd.DataFrame(nx.all_pairs_dijkstra_path_length(g)),
                          'c_exp': pd.DataFrame(nx.communicability_exp(g))})    
    return P1
Example #21
0
def compute_node_measures(ntwk, calculate_cliques=False):
    """
    These return node-based measures
    """
    iflogger.info('Computing node measures:')
    measures = {}
    iflogger.info('...Computing degree...')
    measures['degree'] = np.array(list(ntwk.degree().values()))
    iflogger.info('...Computing load centrality...')
    measures['load_centrality'] = np.array(
        list(nx.load_centrality(ntwk).values()))
    iflogger.info('...Computing betweenness centrality...')
    measures['betweenness_centrality'] = np.array(
        list(nx.betweenness_centrality(ntwk).values()))
    iflogger.info('...Computing degree centrality...')
    measures['degree_centrality'] = np.array(
        list(nx.degree_centrality(ntwk).values()))
    iflogger.info('...Computing closeness centrality...')
    measures['closeness_centrality'] = np.array(
        list(nx.closeness_centrality(ntwk).values()))
    #    iflogger.info('...Computing eigenvector centrality...')
    #    measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values())
    iflogger.info('...Computing triangles...')
    measures['triangles'] = np.array(list(nx.triangles(ntwk).values()))
    iflogger.info('...Computing clustering...')
    measures['clustering'] = np.array(list(nx.clustering(ntwk).values()))
    iflogger.info('...Computing k-core number')
    measures['core_number'] = np.array(list(nx.core_number(ntwk).values()))
    iflogger.info('...Identifying network isolates...')
    isolate_list = nx.isolates(ntwk)
    binarized = np.zeros((ntwk.number_of_nodes(), 1))
    for value in isolate_list:
        value = value - 1  # Zero indexing
        binarized[value] = 1
    measures['isolates'] = binarized
    if calculate_cliques:
        iflogger.info('...Calculating node clique number')
        measures['node_clique_number'] = np.array(
            list(nx.node_clique_number(ntwk).values()))
        iflogger.info('...Computing number of cliques for each node...')
        measures['number_of_cliques'] = np.array(
            list(nx.number_of_cliques(ntwk).values()))
    return measures
 def test_florentine_families_load(self):
     G=self.F
     c=nx.load_centrality(G)
     d={'Acciaiuoli':    0.000,
        'Albizzi':       0.211,
        'Barbadori':     0.093,
        'Bischeri':      0.104,
        'Castellani':    0.055,
        'Ginori':        0.000,
        'Guadagni':      0.251,
        'Lamberteschi':  0.000,
        'Medici':        0.522,
        'Pazzi':         0.000,
        'Peruzzi':       0.022,
        'Ridolfi':       0.117,
        'Salviati':      0.143,
        'Strozzi':       0.106,
        'Tornabuoni':    0.090}
     for n in sorted(G):
         assert_almost_equal(c[n],d[n],places=3)
 def features(G,normalize_centrality):
     '''
     Returns the features we are interested in within a dict
     '''
     load_centrality=nx.load_centrality(G,normalized=normalize_centrality)
     betweenness_centrality=nx.betweenness_centrality(G,normalized=normalize_centrality)
     eigenvector_centrality=nx.eigenvector_centrality_numpy(G,normalized=normalize_centrality)
     closeness_centrality=nx.closeness_centrality(G,normalized=normalize_centrality)
     in_degree=G.in_degree()
     out_degree=G.out_degree()
     core_number=nx.core_number(G)
     clustering=nx.clustering(G)
     d={}
     d['in_degree']=in_degree
     d['out_degree']=out_degree
     d['load_centrality']=load_centrality
     d['betweennes_centrality']=betweennes_centrality
     d['eigenvector_centrality']=eigenvector_centrality
     d['closeness_centrality']=closeness_centrality
     d['core_number']=core_number
     return d
    def test_unnormalized_florentine_families_load(self):
        G=self.F
        c=nx.load_centrality(G,normalized=False)

        d={'Acciaiuoli':  0.000,
           'Albizzi':    38.333, 
           'Barbadori':  17.000,
           'Bischeri':   19.000,
           'Castellani': 10.000,
           'Ginori':     0.000,
           'Guadagni':   45.667,
           'Lamberteschi': 0.000,
           'Medici':     95.000,
           'Pazzi':      0.000,
           'Peruzzi':    4.000,
           'Ridolfi':    21.333,
           'Salviati':   26.000,
           'Strozzi':    19.333,
           'Tornabuoni': 16.333}
        for n in sorted(G):
            assert_almost_equal(c[n],d[n],places=3)
    def test_load_betweenness_difference(self):
        # Difference Between Load and Betweenness
        # --------------------------------------- The smallest graph
        # that shows the difference between load and betweenness is
        # G=ladder_graph(3) (Graph B below)

        # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
        # Wang: Comment on ``Scientific collaboration
        # networks. II. Shortest paths, weighted networks, and
        # centrality". http://arxiv.org/pdf/physics/0511084

        # Notice that unlike here, their calculation adds to 1 to the
        # betweennes of every node i for every path from i to every
        # other node.  This is exactly what it should be, based on
        # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
        # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
        # they allow v to be the target node.

        # We follow Brandes 2001, who follows Freeman 1977 that make
        # the sum for betweenness of v exclude paths where v is either
        # the source or target node.  To agree with their numbers, we
        # must additionally, remove edge (4,8) from the graph, see AC
        # example following (there is a mistake in the figure in their
        # paper - personal communication).

        # A = nx.Graph()
        # A.add_edges_from([(0,1), (1,2), (1,3), (2,4), 
        #                  (3,5), (4,6), (4,7), (4,8), 
        #                  (5,8), (6,9), (7,9), (8,9)])
        B = nx.Graph() # ladder_graph(3)
        B.add_edges_from([(0,1), (0,2), (1,3), (2,3), (2,4), (4,5), (3,5)])
        c = nx.load_centrality(B,normalized=False)
        d={0: 1.750,
           1: 1.750,
           2: 6.500,
           3: 6.500,
           4: 1.750,
           5: 1.750}
        for n in sorted(B):
            assert_almost_equal(c[n],d[n],places=3)
Example #26
0
def load_centrality(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.load_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "hi load"

  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(seed_num)

  return map(lambda (x, y): x, clse_cent)
Example #27
0
def cal_top10(bar_time):
    list_inv=[]
    now=datetime.now()
    reader=csv.reader(open("invs.csv", 'rU'))
    for item in reader:
        time=datetime.strptime(item[0],"%Y.%m.%d")
        # bar_time=datetime(2010,5,10)
        if (time-bar_time).days < 0 :
            diff=now-time
            if diff.days<1000:
                wei_days=1.0
            else:
                wei_days=1-round(float(diff.days)/6000,5)*0.5
            list_inv.append([wei_days,item[1],item[2]])


    import networkx as nx
    # print invest_relations
    G=nx.Graph()
    nodes=[]
    for node in list_inv:
        G.add_edge(node[1],node[2],weight=node[0])
        nodes.append(node[1])
        nodes.append(node[2])

    nodes = list(set(nodes))

    Bcdict=nx.betweenness_centrality(G,weight="weight")
    CCdict=nx.closeness_centrality(G)
    EVCdict=nx.eigenvector_centrality(G,weight="weight")
    DegreeDict=nx.degree_centrality(G)
    loaddict=nx.load_centrality(G,weight="weight")

    sCCdict=sorted(CCdict, key=CCdict.__getitem__,reverse=True)
    sCCdict=sCCdict[0:50]
    return sCCdict
Example #28
0
print('pagerank')
a = nx.pagerank(nxg).values()
my_list.append(a)

print('closeness')
a = nx.closeness_centrality(nxg).values()
my_list.append(a)

print('eigenvector_centrality')
a = nx.eigenvector_centrality(nxg).values()
my_list.append(a)

print('harmonic_centrality')
a = nx.harmonic_centrality(nxg).values()
my_list.append(a)

print('load_centrality')
a = nx.load_centrality(nxg).values()
my_list.append(a)

for i in my_list:
    print (len(i))

with open('centralidades-'+sys.argv[1].split('.')[0]+'-picke.txt', 'wb') as f:
    pickle.dump(my_list, f)
with open('sementes'+sys.argv[1].split('.')[0]+'.txt', 'w') as f:
    for s in my_list:
        f.write(str(s))
        f.write('\n')
Example #29
0
def function(input):

    if input == 1:
        clustering_coefficient = nx.clustering(G)
        clustering_coefficient = normalise(clustering_coefficient)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            clustering_coefficient.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            clustering_coefficient, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_clustering_coefficient.txt")
        write_to_file(dev_data, baseDir + "dev_clustering_coefficient.txt")
        write_to_file(test_data, baseDir + "test_clustering_coefficient.txt")

    if input == 2:
        betweenness_centrality = nx.betweenness_centrality(G, normalized=True)
        betweenness_centrality = normalise(betweenness_centrality)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            betweenness_centrality.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            betweenness_centrality, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_betweenness_centrality.txt")
        write_to_file(dev_data, baseDir + "dev_betweenness_centrality.txt")
        write_to_file(test_data, baseDir + "test_betweenness_centrality.txt")

    if input == 3:
        closeness_centrality = nx.closeness_centrality(G, normalized=True)
        closeness_centrality = normalise(closeness_centrality)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            closeness_centrality.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            closeness_centrality, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_closeness_centrality.txt")
        write_to_file(dev_data, baseDir + "dev_closeness_centrality.txt")
        write_to_file(test_data, baseDir + "test_closeness_centrality.txt")

    if input == 4:
        average_neighbor_degree = nx.average_neighbor_degree(G)
        average_neighbor_degree = normalise(average_neighbor_degree)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            average_neighbor_degree.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            average_neighbor_degree, train_keys, dev_keys, test_keys)
        write_to_file(train_data,
                      baseDir + "train_average_neighbor_degree.txt")
        write_to_file(dev_data, baseDir + "dev_average_neighbor_degree.txt")
        write_to_file(test_data, baseDir + "test_average_neighbor_degree.txt")

    if input == 5:
        degree_centrality = nx.degree_centrality(G)
        degree_centrality = normalise(degree_centrality)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            degree_centrality.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            degree_centrality, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_degree_centrality.txt")
        write_to_file(dev_data, baseDir + "dev_degree_centrality.txt")
        write_to_file(test_data, baseDir + "test_degree_centrality.txt")

    if input == 6:
        load_centrality = nx.load_centrality(G, normalized=True)
        load_centrality = normalise(load_centrality)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            load_centrality.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            load_centrality, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_load_centrality.txt")
        write_to_file(dev_data, baseDir + "dev_load_centrality.txt")
        write_to_file(test_data, baseDir + "test_load_centrality.txt")

    if input == 7:
        shortest_path_length_dict = nx.shortest_path_length(G)
        shortest_path_length = {}
        for key_1 in shortest_path_length_dict:
            for key_2 in shortest_path_length_dict[key_1]:
                shortest_path_length[
                    str(key_1) + "\t" +
                    str(key_2)] = shortest_path_length_dict[key_1][key_2]
        shortest_patth_length = normalise(shortest_path_length)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            shortest_path_length.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            shortest_path_length, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_shortest_path_length.txt")
        write_to_file(dev_data, baseDir + "dev_shortest_path_length.txt")
        write_to_file(test_data, baseDir + "test_shortest_path_length.txt")

    if input == 8:
        jaccard_coefficient = nx.jaccard_coefficient(G)
        jaccard_coefficient_dict = {}
        for u, v, p in jaccard_coefficient:
            jaccard_coefficient_dict[str(u) + "\t" + str(v)] = p
        jaccard_coefficient_dict = normalise(jaccard_coefficient_dict)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            jaccard_coefficient_dict.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            jaccard_coefficient_dict, train_keys, dev_keys, test_keys)
        write_to_file(train_data,
                      baseDir + "train_jaccard_coefficient_dict.txt")
        write_to_file(dev_data, baseDir + "dev_jaccard_coefficient_dict.txt")
        write_to_file(test_data, baseDir + "test_jaccard_coefficient_dict.txt")

    if input == 9:
        katz_centrality = nx.katz_centrality(G)
        katz_centrality = normalise(katz_centrality)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            katz_centrality.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            katz_centrality, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_katz_centrality.txt")
        write_to_file(dev_data, baseDir + "dev_katz_centrality.txt")
        write_to_file(test_data, baseDir + "test_katz_centrality.txt")

    if input == 10:
        pagerank = nx.pagerank(G)
        pagerank = normalise(pagerank)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            pagerank.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            pagerank, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_pagerank.txt")
        write_to_file(dev_data, baseDir + "dev_pagerank.txt")
        write_to_file(test_data, baseDir + "test_pagerank.txt")

    if input == 11:
        communicability = nx.communicability(G)
        communicability = normalise(pagerank)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            communicability.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            communicability, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_communicability.txt")
        write_to_file(dev_data, baseDir + "dev_communicability.txt")
        write_to_file(test_data, baseDir + "test_communicability.txt")

    if input == 12:
        degree = G.degree()
        degree = normalise(degree)
        train_keys, dev_keys, test_keys = create_train_test_dev_split(
            degree.keys())
        train_data, dev_data, test_data = write_train_test_dev(
            degree, train_keys, dev_keys, test_keys)
        write_to_file(train_data, baseDir + "train_degree.txt")
        write_to_file(dev_data, baseDir + "dev_degree.txt")
        write_to_file(test_data, baseDir + "test_degree.txt")
Example #30
0
for node, degree in degreeR:
    if degree > max_degreeR:
        max_degreeR = degree
        nodeR = node
        
print("The node ",node, "has the highest degree in random graph with a value of ",max_degree)


# <font color='blue'>8. What is the node with the highest betweeness?</font> 

# In[7]:


import operator
betweenCE = nx.load_centrality(G_CE)
betweenR = nx.load_centrality(random)
max_betweenCE = max(betweenCE.items(), key=operator.itemgetter(1))
max_betweenR = max(betweenR.items(), key=operator.itemgetter(1))

print("The node ",max_betweenCE[0], "has the highest betweeness in G_CE graph with a value of ",max_betweenCE[1])
print("The node ",max_betweenR[0], "has the highest betweeness in random graph with a value of ",max_betweenR[1])


# <font color='blue'>9. What is the node with the highest closeness?</font> 

# In[8]:


closeCE = nx.closeness_centrality(G_CE)
closeR = nx.closeness_centrality(random)
def make_net(centrality_name, in_path, out_path):
	#sample code
		#import _2_time_based_data_network_feature
		#make_net_in_path = "../3.time_based_data/1.cite_relation_devide/"
		#make_net_out_path = "../3.time_based_data/2.centrality_data/"
		#_2_time_based_data.make_net( "in_degree", make_net_in_path, make_net_out_path)

	#네트워크를 만들고 Centurality를 계산하고 저장할 것이다.
	import networkx as nx
	global Dump
	Dump = {}
	make_net_initialize(in_path)
	start_time = time.time()
	temp_start_time = time.time()

	print "=============		make_net start:" + centrality_name + "		=============="
	print "=============		from 1951 to 2015		=============="

	for year in range(1951, 2016):
		print year
		f_in = open(in_path + str(year) + "_cite.csv","r")
		lines = f_in.readlines()
		f_in.close()
		edge_list = []

		for line in lines:
			data = line.split(",")
			data_tuple = (data[0].strip(), data[1].strip())
			edge_list.append(data_tuple)

		Net = nx.DiGraph(edge_list)
		Cen_in = {}
		if (centrality_name == "in_degree"):
			Cen_in = nx.in_degree_centrality(Net)
		elif (centrality_name == "degree"):
			Cen_in = nx.degree_centrality(Net)
		elif (centrality_name == "eigenvector"):
			Cen_in = nx.eigenvector_centrality_numpy(Net)
		elif (centrality_name == "katz"):
			Cen_in = nx.katz_centrality(Net)
		elif (centrality_name == "pagerank"):
			Cen_in = nx.pagerank(Net)
		elif (centrality_name == "communicability"):
			Net = nx.Graph(edge_list)
			Cen_in = nx.communicability_centrality(Net)
		elif (centrality_name == "load"):
			Cen_in = nx.load_centrality(Net)
		
		for j in Cen_in:
			key = j
			val = Cen_in[j]
			Dump[key][year] = val

	#저장하는 코드 
	f_out = open(out_path + centrality_name +"_centrality.csv", "w")
	for key in Dump:
		line = str(key)
		for year in range(1951, 2016):
			data = Dump[key].get(year, 0)
			line = line + ","+ str(data)
		line = line + "\n"
		f_out.write(line)
	f_out.close()

	print "=============		make_net end			=============="
	print(centrality_name + "takes %s seconds" % (time.time() - temp_start_time))
	temp_start_time = time.time()
def get_load_flow(G):
    deg = nx.load_centrality(G)
    deg_df = pd.Series(deg).to_frame()
    deg_df.columns = ['Load']
    deg_df['load_rank'] = deg_df['Load'].rank(method='min', ascending=False)
    return deg_df
Example #33
0
        H.add_edge(lista[r][0], lista[r][1], capacity=R)
    for w in range(orden[i]):
        initial = random.randint(0, round(len(H.nodes) / 2))
        final = random.randint(initial, len(H.nodes) - 2)
        while initial == final:
            initial = random.randint(0, round(len(H.nodes) / 2))
            final = random.randint(initial, len(H.nodes) - 2)
        tiempo_inicial = time()
        T = nx.maximum_flow(H, initial, final)
        tiempo_final = time()
        datos[fila, 0] = T[0]
        datos[fila, 1] = tiempo_final - tiempo_inicial
        #---------------------------Info Fuente--------------------------------------
        datos[fila, 2] = nx.clustering(H, nodes=initial)
        datos[fila, 3] = nx.closeness_centrality(H, u=initial)
        datos[fila, 4] = nx.load_centrality(H, v=initial)
        datos[fila, 5] = nx.eccentricity(H, v=initial)
        datos[fila, 6] = nx.pagerank(H, alpha=0.9)[initial]
        #---------------------------Info Sumidero--------------------------------------
        datos[fila, 7] = nx.clustering(H, nodes=final)
        datos[fila, 8] = nx.closeness_centrality(H, u=final)
        datos[fila, 9] = nx.load_centrality(H, v=final)
        datos[fila, 10] = nx.eccentricity(H, v=final)
        datos[fila, 11] = nx.pagerank(H, alpha=0.9)[final]
        fila += 1

data = datos.copy()
data = pd.DataFrame(data)
data.columns = [
    'F.O.', 'Tiempo', 'Clstr_fuente', 'Clsns_fuente', 'Load_fuente',
    'Ex_fuente', 'Prank_fuente', 'Clstr_sumidero', 'Clsns_sumidero',
Example #34
0
 def test_unnormalized_p3_load(self):
     G = self.P3
     c = nx.load_centrality(G, normalized=False)
     d = {0: 0.000, 1: 2.000, 2: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n], d[n], places=3)
Example #35
0
def calculate_centrality(G):
    # dc_dumps = json.dumps(nx.degree_centrality(G).items(),sort_keys=True,indent=4)
    # dc_loads = json.loads(dc_dumps)
    dc_sorted = sorted(nx.degree_centrality(G).items(),
                       key=itemgetter(0),
                       reverse=True)
    bc_sorted = sorted(nx.betweenness_centrality(G).items(),
                       key=itemgetter(0),
                       reverse=True)
    clc_sorted = sorted(nx.closeness_centrality(G).items(),
                        key=itemgetter(0),
                        reverse=True)
    coc_sorted = sorted(nx.communicability_centrality(G).items(),
                        key=itemgetter(0),
                        reverse=True)
    lc_sorted = sorted(nx.load_centrality(G).items(),
                       key=itemgetter(0),
                       reverse=True)
    cfbc_sorted = sorted(nx.current_flow_betweenness_centrality(G).items(),
                         key=itemgetter(0),
                         reverse=True)
    cfcc_sorted = sorted(nx.current_flow_closeness_centrality(G).items(),
                         key=itemgetter(0),
                         reverse=True)
    # print ec_sorted[0]

    developer_centrality = []

    developer_file = file("public/wordpress/developer.json")
    developers = json.load(developer_file)
    for developer in developers:
        degree = 0
        betweenness = 0
        closeness = 0
        communicability = 0
        load = 0
        current_flow_betweenness = 0
        current_flow_closeness = 0
        for i in range(0, len(dc_sorted)):
            # if ( not dc_sorted[i][0] == bc_sorted[i][0] == clc_sorted[i][0] == coc_sorted[i][0] == lc_sorted[i][0] == cfbc_sorted[i][0]):
            # 	print 'false'
            if (developer['developer'] == dc_sorted[i][0]):
                degree = dc_sorted[i][1]
                betweenness = bc_sorted[i][1]
                closeness = clc_sorted[i][1]
                communicability = coc_sorted[i][1]
                load = lc_sorted[i][1]
                current_flow_betweenness = cfbc_sorted[i][1]
                current_flow_closeness = cfcc_sorted[i][1]

        developer_centrality.append({
            'name':
            developer['developer'],
            'degree':
            degree,
            'betweenness':
            betweenness,
            'closeness':
            closeness,
            'communicability':
            communicability,
            'load':
            load,
            'current_flow_betweenness':
            current_flow_betweenness,
            'current_flow_closeness':
            current_flow_closeness,
        })

    return developer_centrality
Example #36
0
def calc_centralities(G,org_name,string_version):
    print("Calculating centralities")
    centrality_measures = {}
    string_location=f.string_version_data(string_version)[0]
    print(string_location)
    # if 1==0:
    if os.path.isfile('centrality_data/%s/%s.cent'%(string_location,org_name)):
        print("Using cached centrality data")
        file=open('centrality_data/%s/%s.cent'%(string_location,org_name))
        lines=file.readlines()
        centrality_list=lines.pop(0).strip().split(' ')
        centrality_list.pop(0)
        
        for i,centrality in enumerate(centrality_list):
            centrality_measures[centrality]={}

        for line in lines:
            value_list=line.split(' ')
            for i,centrality in enumerate(centrality_list):
                # print("%d. %s" % (i+1,centrality))
                centrality_measures[centrality][value_list[0]]=float(value_list[i+1])
    else:
        
        print("1. Degree centrality")
        centrality_measures['Degree_Centrality']=nx.degree_centrality(G)
        
        print("2. Closeness centrality")
        centrality_measures['Closeness_Centrality']=Counter(nx.algorithms.centrality.closeness_centrality(G))
        
        print("3. Betweenness centrality")
        centrality_measures['Betweenness_Centrality']=Counter(nx.algorithms.centrality.betweenness_centrality(G))
        
        print("4. Clustering coefficient")
        centrality_measures['Clustering_Co-efficient']=Counter(nx.clustering(G))
        
        print("5. Eigenvector centrality")
        centrality_measures['Eigenvector_Centrality']= nx.eigenvector_centrality(G)
        
        print("6. Subgraph centrality")
        centrality_measures["Subgraph_Centrality"]=nx.subgraph_centrality(G)
        
        print("7. Information centrality")
        centrality_measures["Information_Centrality"]=nx.current_flow_closeness_centrality(f.trim_graph(G))
        
        print("8. Clique Number")
        cliq={}
        for i in G.nodes():
           cliq[i]=nx.node_clique_number(G,i)
        centrality_measures["Clique_Number"]=cliq
        
        print("9. Edge clustering coefficient")
        edge_clus_coeff={}
        for n in G.nodes:
            edge_clus_coeff[n]=0
            for e in G.edges(n):
                num=len(list(nx.common_neighbors(G,e[0],e[1])))
                den=(min(G.degree(e[0]),G.degree(e[1]))-1)
                if den==0:
                    den=1
                edge_clus_coeff[n]+=num/den
    
        centrality_measures['Edge_Clustering_Coefficient']=edge_clus_coeff
        
        print("10. Page Rank")
        centrality_measures['Page_Rank']=nx.pagerank(G)
        
        print("11. Random Walk Betweenness Centrality")
        centrality_measures["Random_Walk_Betweenness_Centrality"]=nx.current_flow_betweenness_centrality(f.trim_graph(G))
        
        print("12. Load Centrality")
        centrality_measures["Load_Centrality"]=nx.load_centrality(G)
        
        print("13. Communicability Betweenness")
        centrality_measures["Communicability_Betweenness"]=nx.communicability_betweenness_centrality(f.trim_graph(G))
        
        print("14. Harmonic Centrality")
        centrality_measures["Harmonic_Centrality"]=nx.harmonic_centrality(G)
            
        print("15. Reaching Centrality")
        reach_cent={}
        for node in G.nodes:
            reach_cent[node] = nx.local_reaching_centrality(G,node)
        centrality_measures["Reaching_Centrality"]=reach_cent
        
        print("16. Katz Centrality(not calculated)")
    #   centrality_measures["Katz_Centrality"]=nx.katz_centrality(G)
    
        datafile=open("refex_props/%s.refex" % (org_name))
        sample_line=datafile.readline()
        s= sample_line.strip().split(' ')
        for x in range(1,len(s)):
            centrality_measures["refex#%d" % (x)]={}                
        for line in datafile:
            props=line.strip().split(" ")
            props=[i.strip('\t') for i in props]
            for x in range(1,len(s)):
                centrality_measures["refex#%d" % (x)][props[0]]=float(props[x])

        datafile=open("refex_rider_props/%s.riderproperties" % (org_name))
        sample_line=datafile.readline()
        s= sample_line.strip().split(' ')
        s.pop(1)
        print(len(s))
        for x in range(1,len(s)):
            centrality_measures["refex_rider#%d" % (x)]={}                
        
        for line in datafile:
            props=line.strip().split(" ")
            props.pop(1)
            for x in range(1,len(props)):

                centrality_measures["refex_rider#%d" % (x)][props[0]]=float(props[x])
    
     
        with open('centrality_data/%s/%s.cent'%(string_location,org_name),'w') as file:
            file.write(str(org_name)+' ')
            centrality_list=list(centrality_measures)
            for x in centrality_list:
                file.write(str(x)+' ')

            for node in G.nodes:
                file.write('\n'+node+' ')
                for x in centrality_list:
                    if node not in centrality_measures[x]:
                        file.write('-1 ')
                    else:
                        file.write(str(centrality_measures[x][node])+' ')
    return centrality_measures
def get_graph(Mat_D, Threshold, percentageConnections=False, complet=False):
    import scipy.io as sio
    import numpy as np
    import networkx as nx
    import pandas as pd
    import os
    Data = sio.loadmat(Mat_D)
    matX = Data['Correlation']  #[:tamn,:tamn]
    labels = Data['labels']
    print(np.shape(matX))
    print(np.shape(labels))
    print(np.min(matX), np.max(matX))

    if percentageConnections:
        if percentageConnections > 0 and percentageConnections < 1:
            for i in range(-100, 100):
                per = np.sum(matX > i / 100.) / np.size(matX)
                if per <= Threshold:
                    Threshold = i / 100.
                    break
            print(Threshold)
        else:
            print('The coefficient is outside rank')

    #Lista de conexion del grafo
    row, col = np.shape(matX)
    e = []
    for i in range(1, row):
        for j in range(i):
            if complet:
                e.append((labels[i], labels[j], matX[i, j]))
            else:
                if matX[i, j] > Threshold:
                    e.append((labels[i], labels[j], matX[i, j]))

    print(np.shape(e)[0], int(((row - 1) * row) / 2))

    #Generar grafo
    G = nx.Graph()
    G.add_weighted_edges_from(e)
    labelNew = list(G.nodes)

    #Metricas por grafo (ponderados)
    Dpc = nx.degree_pearson_correlation_coefficient(G, weight='weight')
    cluster = nx.average_clustering(G, weight='weight')

    #No ponderados
    estra = nx.estrada_index(G)
    tnsity = nx.transitivity(G)
    conNo = nx.average_node_connectivity(G)
    ac = nx.degree_assortativity_coefficient(G)

    #Metricas por nodo
    tam = 15
    BoolCenV = False
    BoolLoad = False
    alpha = 0.1
    beta = 1.0

    katxCN = nx.katz_centrality_numpy(G,
                                      alpha=alpha,
                                      beta=beta,
                                      weight='weight')
    bcen = nx.betweenness_centrality(G, weight='weight')
    av_nd = nx.average_neighbor_degree(G, weight='weight')
    ctr = nx.clustering(G, weight='weight')
    ranPaN = nx.pagerank_numpy(G, weight='weight')
    Gol_N = nx.hits_numpy(G)
    Dgc = nx.degree_centrality(G)
    cl_ce = nx.closeness_centrality(G)
    cluster_Sq = nx.square_clustering(G)
    centr = nx.core_number(G)
    cami = nx.node_clique_number(G)
    camiN = nx.number_of_cliques(G)
    trian = nx.triangles(G)
    colorG = nx.greedy_color(G)
    try:
        cenVNum = nx.eigenvector_centrality_numpy(G, weight='weight')
        tam = tam + 1
        BoolCenV = True
    except TypeError:
        print(
            "La red es muy pequeña y no se puede calcular este parametro gil")
    except:
        print('NetworkXPointlessConcept: graph null')
    if Threshold > 0:
        carga_cen = nx.load_centrality(G, weight='weight')  #Pesos  positivos
        BoolLoad = True
        tam = tam + 1
    #katxC=nx.katz_centrality(G, alpha=alpha, beta=beta, weight='weight')
    #cenV=nx.eigenvector_centrality(G,weight='weight')
    #cenV=nx.eigenvector_centrality(G,weight='weight')
    #Golp=nx.hits(G)
    #Gol_si=nx.hits_scipy(G)
    #ranPa=nx.pagerank(G, weight='weight')
    #ranPaS=nx.pagerank_scipy(G, weight='weight')

    matrix_datos = np.zeros((tam, np.shape(labelNew)[0]))
    tam = 15
    print(np.shape(matrix_datos))
    lim = np.shape(labelNew)[0]
    for i in range(lim):
        roi = labelNew[i]
        #print(roi)
        matrix_datos[0, i] = katxCN[roi]
        matrix_datos[1, i] = bcen[roi]
        matrix_datos[2, i] = av_nd[roi]
        matrix_datos[3, i] = ctr[roi]
        matrix_datos[4, i] = ranPaN[roi]
        matrix_datos[5, i] = Gol_N[0][roi]
        matrix_datos[6, i] = Gol_N[1][roi]
        matrix_datos[7, i] = Dgc[roi]
        matrix_datos[8, i] = cl_ce[roi]
        matrix_datos[9, i] = cluster_Sq[roi]
        matrix_datos[10, i] = centr[roi]
        matrix_datos[11, i] = cami[roi]
        matrix_datos[12, i] = camiN[roi]
        matrix_datos[13, i] = trian[roi]
        matrix_datos[14, i] = colorG[roi]
        if BoolCenV:
            matrix_datos[15, i] = cenVNum[roi]
            tam = tam + 1
        if BoolLoad:
            matrix_datos[16, i] = carga_cen[roi]
            tam = tam + 1
        #matrix_datos[0,i]=katxC[roi]
        #matrix_datos[2,i]=cenV[roi]
        #matrix_datos[7,i]=Golp[0][roi]
        #matrix_datos[9,i]=Gol_si[0][roi]
        #matrix_datos[10,i]=Golp[1][roi]
        #matrix_datos[12,i]=Gol_si[1][roi]
        #matrix_datos[22,i]=ranPa[roi]
        #matrix_datos[24,i]=ranPaS[roi]
    FuncName = [
        'degree_pearson_correlation_coefficient', 'average_clustering',
        'estrada_index', 'transitivity', 'average_node_connectivity',
        'degree_assortativity_coefficient', 'katz_centrality_numpy',
        'betweenness_centrality', 'average_neighbor_degree', 'clustering',
        'pagerank_numpy', 'hits_numpy0', 'hits_numpy1', 'degree_centrality',
        'closeness_centrality', 'square_clustering', 'core_number',
        'node_clique_number', 'number_of_cliques', 'triangles', 'greedy_color',
        'eigenvector_centrality_numpy', 'load_centrality'
    ]
    frame = pd.DataFrame(matrix_datos)
    frame.columns = labelNew
    frame.index = FuncName[6:tam]

    Resul = os.getcwd()
    out_data = Resul + '/graph_metrics.csv'
    out_mat = Resul + '/graph_metrics_global.mat'

    frame.to_csv(out_data)
    sio.savemat(
        out_mat, {
            FuncName[0]: Dpc,
            FuncName[1]: cluster,
            FuncName[2]: estra,
            FuncName[3]: tnsity,
            FuncName[4]: conNo,
            FuncName[5]: ac
        })
    return out_data, out_mat
Example #38
0
def return_load_centralities(net):

    ## triangle-based clustering
    G = nx.from_scipy_sparse_matrix(net)
    centralities = nx.load_centrality(G)
    return centralities
def make_net(centrality_name, in_path, out_path):
    #sample code
    #import _2_time_based_data_network_feature
    #make_net_in_path = "../3.time_based_data/1.cite_relation_devide/"
    #make_net_out_path = "../3.time_based_data/2.centrality_data/"
    #_2_time_based_data.make_net( "in_degree", make_net_in_path, make_net_out_path)

    #네트워크를 만들고 Centurality를 계산하고 저장할 것이다.
    import networkx as nx
    global Dump
    Dump = {}
    make_net_initialize(in_path)
    start_time = time.time()
    temp_start_time = time.time()

    print "=============		make_net start:" + centrality_name + "		=============="
    print "=============		from 1951 to 2015		=============="

    for year in range(1951, 2016):
        print year
        f_in = open(in_path + str(year) + "_cite.csv", "r")
        lines = f_in.readlines()
        f_in.close()
        edge_list = []

        for line in lines:
            data = line.split(",")
            data_tuple = (data[0].strip(), data[1].strip())
            edge_list.append(data_tuple)

        Net = nx.DiGraph(edge_list)
        Cen_in = {}
        if (centrality_name == "in_degree"):
            Cen_in = nx.in_degree_centrality(Net)
        elif (centrality_name == "degree"):
            Cen_in = nx.degree_centrality(Net)
        elif (centrality_name == "eigenvector"):
            Cen_in = nx.eigenvector_centrality_numpy(Net)
        elif (centrality_name == "katz"):
            Cen_in = nx.katz_centrality(Net)
        elif (centrality_name == "pagerank"):
            Cen_in = nx.pagerank(Net)
        elif (centrality_name == "communicability"):
            Net = nx.Graph(edge_list)
            Cen_in = nx.communicability_centrality(Net)
        elif (centrality_name == "load"):
            Cen_in = nx.load_centrality(Net)

        for j in Cen_in:
            key = j
            val = Cen_in[j]
            Dump[key][year] = val

    #저장하는 코드
    f_out = open(out_path + centrality_name + "_centrality.csv", "w")
    for key in Dump:
        line = str(key)
        for year in range(1951, 2016):
            data = Dump[key].get(year, 0)
            line = line + "," + str(data)
        line = line + "\n"
        f_out.write(line)
    f_out.close()

    print "=============		make_net end			=============="
    print(centrality_name + "takes %s seconds" %
          (time.time() - temp_start_time))
    temp_start_time = time.time()
def analyze(network):
    import networkx as nx

    G = nx.Graph()

    def weight(a, b):
        """
        Weight of edge. In dot,
        the heavier the weight, the shorter, straighter and more vertical the edge is.
        For other layouts, a larger weight encourages the layout to make the edge length
        closer to that specified by the len attribute.
        """
        # same node is weight == 1
        return (1 - a.distance(b) / devp2p.kademlia.k_max_node_id) * 10

    for node in network.values():
        for r in node.connections:
            G.add_edge(node, r, weight=weight(node, r))

    num_peers = [len(n.connections) for n in network.values()]
    metrics = OrderedDict(num_nodes=len(network))
    metrics['max_peers'] = max(num_peers)
    metrics['min_peers'] = min(num_peers)
    metrics['avg_peers'] = statistics.mean(num_peers)
    metrics['rsd_peers'] = statistics.stdev(num_peers) / statistics.mean(
        num_peers)

    # calc shortests paths
    # lower is better
    if nx.is_connected(G):
        print 'calculating avg_shortest_path'
        avg_shortest_paths = []
        for node in G:
            path_length = nx.single_source_shortest_path_length(G, node)
            avg_shortest_paths.append(statistics.mean(path_length.values()))
        metrics['avg_shortest_path'] = statistics.mean(avg_shortest_paths)
        metrics['rsd_shortest_path'] = statistics.stdev(
            avg_shortest_paths) / metrics['avg_shortest_path']

    try:
        # Closeness centrality at a node is 1/average distance to all other nodes.
        # higher is better
        print 'calculating closeness centrality'
        vs = nx.closeness_centrality(G).values()
        metrics['min_closeness_centrality'] = min(vs)
        metrics['avg_closeness_centrality'] = statistics.mean(vs)
        metrics['rsd_closeness_centrality'] = statistics.stdev(
            vs) / metrics['avg_closeness_centrality']

        # The load centrality of a node is the fraction of all shortest paths that
        # pass through that node
        # Daniel:
        # I recommend calculating (or estimating) the centrality of each node and making sure that
        # there are no nodes with much higher centrality than the average.
        # lower is better
        print 'calculating load centrality'
        vs = nx.load_centrality(G).values()
        metrics['max_load_centrality'] = max(vs)
        metrics['avg_load_centrality'] = statistics.mean(vs)
        metrics['rsd_load_centrality'] = statistics.stdev(
            vs) / metrics['avg_load_centrality']

        print 'calculating node_connectivity'
        # higher is better
        metrics['node_connectivity'] = nx.node_connectivity(G)

        print 'calculating diameter'
        # lower is better
        metrics['diameter '] = nx.diameter(G)

    except nx.exception.NetworkXError as e:
        metrics['ERROR'] = -1
    return metrics
Example #41
0
 def test_k5_load(self):
     G = self.K5
     c = nx.load_centrality(G)
     d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n], d[n], places=3)
Example #42
0
 def test_p2_load(self):
     G = nx.path_graph(2)
     c = nx.load_centrality(G)
     d = {0: 0.000, 1: 0.000}
     for n in sorted(G):
         assert_almost_equal(c[n], d[n], places=3)
Example #43
0
 def metric(self, metric, node):
     value = None
     if metric == 'lc':
         value = nx.load_centrality(self.FG, node)
     return value
Example #44
0
def mergegraph(graphs, pos_old, labels_old, edge_prob=0.3, edge_num=0.4):
    nodes = []
    edges = []
    pos = {}
    node_cnt = 0
    val = 0.9
    shift_value = [[-val, val], [val, val], [-val, -val], [val, -val]]

    comm_lables = []

    for i, g in enumerate(graphs):
        tmp_nodes = list(g.nodes())
        tmp_edges = list(g.edges())

        comm_lables += [i] * len(tmp_nodes)

        node_map = {k: node_cnt + i for k, i in enumerate(tmp_nodes)}
        node_cnt += len(tmp_nodes)

        new_nodes = [node_map[n] for n in tmp_nodes]
        new_edges = [(node_map[u], node_map[v]) for u, v in tmp_edges]

        for k, v in pos_old[i].items():
            pos_old[i][k][0] += shift_value[i][0]
            pos_old[i][k][1] += shift_value[i][1]

        new_pos = {node_map[n]: v for n, v in pos_old[i].items()}

        nodes += new_nodes
        edges += new_edges
        pos.update(new_pos)

    G = nx.DiGraph()
    G.add_edges_from(edges)

    random.shuffle(nodes)
    l = int(edge_num * len(nodes))
    u = nodes[0:l]
    random.shuffle(nodes)
    v = nodes[0:l]

    for s, t in zip(u, v):
        if random.random() < edge_prob:
            G.add_edge(s, t)
            G.add_edge(t, s)
    nodes_deg = [G.degree[i] for i in G.nodes()]

    centrality = nx.closeness_centrality(G)
    labels_central = get_labels(centrality)
    print('centrality done!')

    inf_cent = nx.information_centrality(G.to_undirected())
    labels_inf_central = get_labels(inf_cent)
    print('info centrality done!')

    betweenness = nx.betweenness_centrality(G.to_undirected())
    labels_betweenness = get_labels(betweenness)
    print('betweenness done!')

    loads = nx.load_centrality(G.to_undirected())
    labels_load = get_labels(loads)
    print('load centrality done!')

    cmm_bet = nx.communicability_betweenness_centrality(G.to_undirected())
    labels_cmm_bet = get_labels(cmm_bet)
    print('commu betweenness done!')

    sce = nx.subgraph_centrality_exp(G.to_undirected())
    labels_sce = get_labels(sce)
    print('subgraph centrality done!')

    harm = nx.harmonic_centrality(G.to_undirected())
    labels_harm = get_labels(harm)
    print('harmonic done!')

    lrc = {
        v: nx.local_reaching_centrality(G.to_undirected(), v)
        for v in G.nodes()
    }
    labels_lrc = get_labels(lrc)
    print('lrc done!')

    unq_lbl = np.unique(nodes_deg)
    lbl_map = {unq_lbl[i]: i for i in range(len(unq_lbl))}
    labels = [lbl_map[k] for k in nodes_deg]
    return G, pos, labels, comm_lables, labels_central, labels_inf_central, labels_betweenness, labels_load, labels_cmm_bet, labels_sce, labels_harm, labels_lrc
def defense():
    '''for key,value in voltages.iteritems():
		weight[key[2]]=value'''
    initial_l = {}
    initial_l_edges = {}
    initial_l = nx.load_centrality(H, normalized=True, weight='cable')
    #bc=sorted(nx.edge_betweenness_centrality(H,normalized=True,weight='cable',reverse=True)
    #print initial_l_edges
    #nodes_to_remove=nx.load_centrality(H)
    nodes_to_remove = nx.out_degree_centrality(H)
    #nodes_to_remove=nx.in_degree_centrality(H)
    #nodes_to_remove=nx.closeness_centrality(H,distance='length',normalized=True)
    #nodes_to_remove=nx.betweenness_centrality(H,weight='cable',normalized=True)
    #hub,authorities=nx.hits(H)
    lamda = 2
    for m, n in initial_l.iteritems():
        initial_l[m] = lamda * n
    m = 0
    remove = []
    remove_nodes = []
    remove_edges = []
    pp = 0
    #High_centrality={}
    '''for key,value in sorted(nodes_to_remove.iteritems(), key=lambda (k,v): (v,k),reverse=True):
		m+=1
		if m>=80 and m<=100:
			remove_nodes.append(key)
	rn=random.choice(remove_nodes)
	remove.append(rn)
	print rn'''
    '''appending 100 nodes that are to be removed from network'''
    for key, value in sorted(nodes_to_remove.iteritems(),
                             key=lambda (k, v): (v, k),
                             reverse=True):
        m += 1
        if m <= 1000:
            remove.append(key)
    in_wcc = len(max(nx.weakly_connected_components(H), key=len))
    '''n_wcc=nx.number_weakly_connected_components(H)
	in_scc=len(max(nx.strongly_connected_components(H), key=len))
	n_scc=nx.number_strongly_connected_components(H)'''
    g = 0
    successor = []
    trans_l = {}
    '''removing the nodes which are selected'''
    while g == 0:
        rr = list(remove)
        ee = list(remove_edges)
        for r in rr:
            if r in set(generators):
                generators.remove(r)
            if V.node[r]['color'] == 'red' or r in set(defended):
                remove.remove(r)
        for inn, e in enumerate(ee):
            if V.edge[ee[inn][0]][ee[inn][1]]['color'] == 'red':
                remove_edges.remove(ee[inn])
        del rr[:]
        del ee[:]
        H.remove_edges_from(remove_edges)
        for ind, eee in enumerate(remove_edges):
            if remove_edges[ind][1] not in set(remove):
                remove.append(remove_edges[ind][1])
        for nn in remove:
            for x in list(H.successors(nn)):
                if x not in set(successor) and x not in set(remove):
                    successor.append(x)
        H.remove_nodes_from(remove)
        for n in list(V.in_edges(remove)):
            V[n[0]][n[1]]['color'] = 'red'
            edges_data[(n[0], n[1])][6] = 'r-'
        for oe in list(V.out_edges(remove)):
            V[oe[0]][oe[1]]['color'] = 'red'
            edges_data[(oe[0], oe[1])][6] = 'r-'
        for l in remove:
            V.node[l]['color'] = 'red'
            nodes_data[l][5] = 'ro'
        for ed in remove_edges:
            V.edge[ed[0]][ed[1]]['color'] = 'red'
            edges_data[(ed[0], ed[1])][6] = 'r-'
        del remove[:]
        del remove_edges[:]
        '''removing components not containing a generator'''
        '''the inner loop is for cascade caused by removed nodes'''
        a = 0
        while a == 0:
            sss = list(successor)
            for zz, rz in enumerate(sss):
                if rz in generators or V.node[rz][
                        'color'] == 'red' or rz in set(defended):
                    successor.pop(zz)
            del sss[:]
            for su in successor:
                itera = 0
                for g in generators:
                    if su == g:
                        itera += 1
                        break
                    elif nx.has_path(H, g, su):
                        itera += 1
                        break
                if itera == 0:
                    remove.append(su)
            del successor[:]
            if len(remove) != 0:
                rrrr = list(remove)
                for ll, r in enumerate(rrrr):
                    #dont have to pop generators here cause they are not getting added in the first place
                    if V.node[r]['color'] == 'red':
                        remove.pop(ll)
                del rrrr[:]
                for nn in remove:
                    for x in list(H.successors(nn)):
                        if x not in set(successor) and x not in set(remove):
                            successor.append(x)
                H.remove_nodes_from(remove)
                for n in list(V.in_edges(remove)):
                    V[n[0]][n[1]]['color'] = 'red'
                    edges_data[(n[0], n[1])][6] = 'r-'
                for oe in list(V.out_edges(remove)):
                    V[oe[0]][oe[1]]['color'] = 'red'
                    edges_data[(oe[0], oe[1])][6] = 'r-'
                for l in remove:
                    V.node[l]['color'] = 'red'
                    nodes_data[l][5] = 'ro'
            else:
                a += 1
            del remove[:]
        '''checking for nodes which are overloaded and appending them into remove list'''
        trans_l = nx.load_centrality(H, normalized=True, weight='cable')
        #trans_l_edges=nx.edge_betweenness_centrality(H,normalized=True,weight='cable')
        b = 0
        for key, value in trans_l.iteritems():
            if trans_l[key] > initial_l[key] and key not in generators:
                remove.append(key)
                b += 1
        '''for ky,vl in trans_l_edges.iteritems():
			if trans_l_edges[ky]>initial_l_edges[ky]:
				print (ky," ",vl," ")
				remove_edges.append(ky)
				mm+=1'''
        trans_l.clear()
        #trans_l_edges.clear()
        if b == 0:
            break
    h = 0
    for cc, vv in nodes_data.iteritems():
        if vv[5] == 'ro':
            h += 1
    #print "node removed",rn
    f_wcc = len(max(nx.weakly_connected_components(H), key=len))
    #f_scc=len(max(nx.strongly_connected_components(H), key=len))
    #nx.write_graphml(V,"outdegreelambda.graphml")
    print "cascade size %f" % ((float(in_wcc) - float(f_wcc)) / float(in_wcc))
    print "%d wcc %d" % (in_wcc, f_wcc)
    print "no. of nodes effected after removing 100 nodes", h
    '''print "%d numberwcc %d"%(n_wcc,nx.number_weakly_connected_components(H))
	print "%d scc %d"%(in_scc,f_scc)
	print "%d numberscc %d"%(n_scc,nx.number_strongly_connected_components(H))'''
    '''creating a csv file containing the nodes and edges with cascaded nodes colored as red'''
    o = []
    with open('../../NS_project/Code/outcentrality_vertices.csv',
              'wb') as csvfile:
        nodewriter = csv.writer(csvfile, delimiter=',')
        header = ['v_id', 'lon', 'lat', 'color']
        nodewriter.writerow(header)
        for da, it in nodes_data.iteritems():
            for l in range(0, 3):
                o.append(str(it[l]))
            o.append(str(it[5]))
            nodewriter.writerow(o)
            del o[:]
    with open('../../NS_project/Code/outcentrality_edges.csv',
              'wb') as csvfile1:
        edgewriter = csv.writer(csvfile1, delimiter=',')
        header1 = ['l_id', 'v_id_1', 'v_id_2', 'color']
        edgewriter.writerow(header1)
        for db, itt in edges_data.iteritems():
            for l in range(0, 3):
                o.append(str(itt[l]))
            o.append(str(itt[6]))
            edgewriter.writerow(o)
            del o[:]
 def test_weighted_load(self):
     b=nx.load_centrality(self.G,weight='weight',normalized=False)
     for n in sorted(self.G):
         assert_equal(b[n],self.exact_weighted[n])
Example #47
0
import matplotlib.pylab as plt
from matplotlib import pylab as plt

n = 80
p = 10. / n
G = nx.fast_gnp_random_graph(n, p, seed=42)


def to_list(dict_):
    return [dict_[k] for k in G.nodes()]


graph_colors = [
    ("degree", to_list(nx.degree_centrality(G))),
    ("betweenness", to_list(nx.betweenness_centrality(G))),
    ("load", to_list(nx.load_centrality(G))),
    ("eigenvector", to_list(nx.eigenvector_centrality_numpy(G))),
    ("closeness_centrality", to_list(nx.closeness_centrality(G))),
    ("current_flow_closeness",
     to_list(nx.current_flow_closeness_centrality(G))),
    ("current_flow_betweenness",
     to_list(nx.current_flow_betweenness_centrality(G))),
    ("katz", to_list(nx.katz_centrality_numpy(G))),
    ("communicability", to_list(nx.communicability_centrality(G))),
]

fig = plot_multigraph.plot_color_multigraph(G,
                                            graph_colors,
                                            3,
                                            3,
                                            node_size=50)
 def test_weighted_load(self):
     b=nx.load_centrality(self.G,weighted_edges=True,
                                normalized=False)
     for n in sorted(self.G):
         assert_equal(b[n],self.exact_weighted[n])
Example #49
0
def load_centrality(graphs, Gids):
    metrics = [nx.load_centrality(G) for G in graphs]
    return pd.Series(metrics, index=Gids, name='load_centrality')
Example #50
0
def load_centrality(G, scale):
    return remove_node(G, nx.load_centrality(G), scale)
Example #51
0
def add_load_node(graf):
    print "Adding load to nodes"
    l_dict = nx.load_centrality(graf)
    nx.set_node_attributes(graf, 'loa', l_dict)
 def test_les_miserables_load(self):
     G = self.LM
     c = nx.load_centrality(G)
     d = {'Napoleon': 0.000,
          'Myriel': 0.177,
          'MlleBaptistine': 0.000,
          'MmeMagloire': 0.000,
          'CountessDeLo': 0.000,
          'Geborand': 0.000,
          'Champtercier': 0.000,
          'Cravatte': 0.000,
          'Count': 0.000,
          'OldMan': 0.000,
          'Valjean': 0.567,
          'Labarre': 0.000,
          'Marguerite': 0.000,
          'MmeDeR': 0.000,
          'Isabeau': 0.000,
          'Gervais': 0.000,
          'Listolier': 0.000,
          'Tholomyes': 0.043,
          'Fameuil': 0.000,
          'Blacheville': 0.000,
          'Favourite': 0.000,
          'Dahlia': 0.000,
          'Zephine': 0.000,
          'Fantine': 0.128,
          'MmeThenardier': 0.029,
          'Thenardier': 0.075,
          'Cosette': 0.024,
          'Javert': 0.054,
          'Fauchelevent': 0.026,
          'Bamatabois': 0.008,
          'Perpetue': 0.000,
          'Simplice': 0.009,
          'Scaufflaire': 0.000,
          'Woman1': 0.000,
          'Judge': 0.000,
          'Champmathieu': 0.000,
          'Brevet': 0.000,
          'Chenildieu': 0.000,
          'Cochepaille': 0.000,
          'Pontmercy': 0.007,
          'Boulatruelle': 0.000,
          'Eponine': 0.012,
          'Anzelma': 0.000,
          'Woman2': 0.000,
          'MotherInnocent': 0.000,
          'Gribier': 0.000,
          'MmeBurgon': 0.026,
          'Jondrette': 0.000,
          'Gavroche': 0.164,
          'Gillenormand': 0.021,
          'Magnon': 0.000,
          'MlleGillenormand': 0.047,
          'MmePontmercy': 0.000,
          'MlleVaubois': 0.000,
          'LtGillenormand': 0.000,
          'Marius': 0.133,
          'BaronessT': 0.000,
          'Mabeuf': 0.028,
          'Enjolras': 0.041,
          'Combeferre': 0.001,
          'Prouvaire': 0.000,
          'Feuilly': 0.001,
          'Courfeyrac': 0.006,
          'Bahorel': 0.002,
          'Bossuet': 0.032,
          'Joly': 0.002,
          'Grantaire': 0.000,
          'MotherPlutarch': 0.000,
          'Gueulemer': 0.005,
          'Babet': 0.005,
          'Claquesous': 0.005,
          'Montparnasse': 0.004,
          'Toussaint': 0.000,
          'Child1': 0.000,
          'Child2': 0.000,
          'Brujon': 0.000,
          'MmeHucheloup': 0.000}
     for n in sorted(G):
         assert almost_equal(c[n], d[n], places=3)
Example #53
0
def compute_network_features(graph, network_name):
    # this function is used to calculate network features and returns result
    # as a dictionary: Dict<feature_name,feature_value>
    # --------------------------------------------------------------------------------

    network_features = dict()
    node_features_list = list()

    netclass = network_name.split('___')

    if len(netclass) > 1:
        network_features['group'] = netclass[0]
        network_features['Network Name'] = netclass[1]
    else:
        network_features['group'] = '_unknown_'
        network_features['Network Name'] = network_name

    if graph.is_directed():
        network_features['Is Directed?'] = True
    else:
        network_features['Is Directed?'] = False

    if graph.is_multigraph():
        network_features['Is MultiGraph?'] = True
    else:
        network_features['Is MultiGraph?'] = False

    # Global Attributes
    # --------------------------------------------------------------------------------
    # number of nodes
    # --------------------------------------------------------------------------------
    if _nn:
        try:
            nodes_count = nx.number_of_nodes(graph)
            network_features['Number of Nodes'] = nodes_count
        except:
            network_features['Number of Nodes'] = 'NA'

    # number of edges
    # --------------------------------------------------------------------------------
    if _ne:
        try:
            edges_count = nx.number_of_edges(graph)
            network_features['Number of Edges'] = edges_count
        except:
            network_features['Number of Edges'] = 'NA'

    # network density
    # --------------------------------------------------------------------------------
    if _dens:
        try:
            density = nx.density(graph)
            network_features['Density'] = density
        except:
            network_features['Density'] = 'NA'

    # graph degree assortativity
    # --------------------------------------------------------------------------------
    if _dac:
        try:
            graph_degree_assortativity = nx.degree_assortativity_coefficient(
                graph)
            network_features[
                'Graph Degree Assortativity'] = graph_degree_assortativity
        except:
            network_features['Graph Degree Assortativity'] = 'NA'

    # avg. closeness centrality
    # --------------------------------------------------------------------------------
    if _acc:
        try:
            ccn = nx.closeness_centrality(graph)
            mccn = np.mean(ccn.values())
            network_features['Avg. Closeness Centrality'] = mccn
        except:
            network_features['Avg. Closeness Centrality'] = 'NA'

    # avg. betweenness centrality
    # --------------------------------------------------------------------------------
    if _abc:
        try:
            bcn = nx.betweenness_centrality(graph)
            mbcn = np.mean(bcn.values())
            network_features['Avg. Betweenness Centrality'] = mbcn
        except:
            network_features['Avg. Betweenness Centrality'] = 'NA'

    # avg. degree centrality
    # --------------------------------------------------------------------------------
    if _adc:
        try:
            dcn = nx.degree_centrality(graph)
            mdcn = np.mean(dcn.values())
            network_features['Avg. Degree Centrality'] = mdcn
        except:
            network_features['Avg. Degree Centrality'] = 'NA'

    # avg. degree connectivity
    # --------------------------------------------------------------------------------
    if _adcon:
        try:
            dc = nx.average_degree_connectivity(graph)
            adc = np.mean(dc.values())
            network_features['Avg. Degree Connectivity'] = adc
        except:
            network_features['Avg. Degree Connectivity'] = 'NA'

    # avg. load centrality
    # --------------------------------------------------------------------------------
    if _alc:
        try:
            lc = nx.load_centrality(graph)
            mlc = np.mean(lc.values())
            network_features['Avg. Load Centrality'] = mlc
        except:
            network_features['Avg. Load Centrality'] = 'NA'

    # avg. edge betweenness centrality
    # --------------------------------------------------------------------------------

    # try:
    #     ebc = nx.edge_betweenness_centrality(graph)
    #     mebc = np.mean(ebc.values())
    #     network_features['Avg. Edge Betweenness centrality'] = mebc
    # except:
    #     network_features['Avg. Edge Betweenness centrality'] = 'NA'

    # edge connectivity
    # --------------------------------------------------------------------------------
    # try:
    #     ec = nx.edge_connectivity(graph)
    #     network_features['Edge Connectivity'] = ec
    # except:
    #     network_features['Edge Connectivity'] = 'NA'

    # diameter
    # --------------------------------------------------------------------------------
    if _nd:
        try:
            diameter = nx.diameter(graph)
            network_features['Diameter'] = diameter
        except:
            network_features['Diameter'] = 'NA'

    # eccentricity
    # --------------------------------------------------------------------------------
    if _ae:
        try:
            eccentricity = nx.eccentricity(graph)
            network_features['Avg. Eccentricity'] = np.mean(
                eccentricity.values())
        except:
            network_features['Eccentricity'] = 'NA'

    # radius
    # --------------------------------------------------------------------------------
    if _rad:
        try:
            radius = nx.radius(graph)
            network_features['Radius'] = radius
        except:
            network_features['Radius'] = 'NA'

    # Non MultiGraph Features
    # --------------------------------------------------------------------------------
    if not graph.is_multigraph():

        # transitivity
        # ----------------------------------------------------------------------------
        if _trans:
            try:
                transitivity = nx.transitivity(graph)
                network_features['Transitivity'] = transitivity
            except:
                network_features['Transitivity'] = 'NA'

        # Katz centrality
        # ----------------------------------------------------------------------------
        if _akc:
            try:
                katz = nx.katz_centrality(graph)
                mean_katz = np.mean(katz.values())
                network_features['Avg. Katz Centrality'] = mean_katz
            except:
                network_features['Avg. Katz Centrality'] = 'NA'

        # PageRank
        # ----------------------------------------------------------------------------
        if _ap:
            try:
                pagerank = nx.pagerank(graph)
                mean_pagerank = np.mean(pagerank.values())
                network_features['Avg. PageRank'] = mean_pagerank
            except:
                network_features['Avg. PageRank'] = 'NA'

    # Undirected Graphs
    # --------------------------------------------------------------------------------

    if not nx.is_directed(graph):

        # Degree
        # ----------------------------------------------------------------------------
        #
        # try:
        #     all_degrees = nx.degree(graph)
        #     mean_degrees = np.mean(all_degrees.values())
        #     network_features['Avg. Degree'] = mean_degrees
        # except:
        #     network_features['Avg. Degree'] = 'NA'

        # connected components
        # ----------------------------------------------------------------------------
        if _nocc:
            try:
                cc_number = nx.number_connected_components(graph)
                network_features['Number of Connected Components'] = cc_number
            except:
                network_features['Number of Connected Components'] = 'NA'

        # lcc size fraction && avg. cc size
        # ----------------------------------------------------------------------------
        if _accs or _lcc_size:
            try:
                cc_list = list(nx.connected_components(graph))
                cc_sizes = []
                for cc in cc_list:
                    cc_sizes.append(len(cc))

                lcc_size = np.max(cc_sizes)
                if _accs:
                    network_features['lcc_size_fraction'] = lcc_size / float(
                        nodes_count)
                if _lcc_size:
                    mean_cc_sizes = np.mean(cc_sizes)
                    network_features[
                        'Avg. Connected Component Size'] = mean_cc_sizes
            except:
                if _accs:
                    network_features['lcc_size_fraction'] = 'NA'
                if _lcc_size:
                    network_features['Avg. Connected Component Size'] = 'NA'

        # communicability centrality for Undirected networks
        # ----------------------------------------------------------------------------
        if not graph.is_multigraph():
            if _acoc:
                try:
                    cc = nx.communicability_centrality(graph)
                    mcc = np.mean(cc.values())
                    network_features['Avg. Communicability Centrality'] = mcc
                except:
                    network_features['Avg. Communicability Centrality'] = 'NA'

            # clustering coefficient
            # -------------------------------------------------------------------------
            if _ncc:
                try:
                    clustering_coefficient = nx.average_clustering(graph)
                    network_features[
                        'Network Clustering Coefficient'] = clustering_coefficient
                except:
                    network_features['Network Clustering Coefficient'] = 'NA'

        # clique analysis for Undirected networks
        # ----------------------------------------------------------------------------
        if _max_cs:
            try:
                cliques_obj = nx.find_cliques(graph)
                cliques = [clq for clq in cliques_obj]

                clique_sizes = []
                for c in cliques:
                    clique_sizes.append(len(c))

                # user_clique_size = 5
                if len(clique_sizes) > 0:
                    # network_features['No of Cliques with size ' + str(user_clique_size)] \
                    # = clique_sizes.count(user_clique_size)
                    network_features['Avg. Clique Size'] = np.mean(
                        clique_sizes)
                    network_features['Max Clique Size'] = np.max(clique_sizes)
                else:
                    # network_features['No of Cliques with size ' + str(user_clique_size)] = 0
                    network_features['Avg. Clique Size'] = 0
                    network_features['Max Clique Size'] = 0
            except:
                # network_features['No of Cliques with size ' + str(user_clique_size)] = 'NA'
                network_features['Avg. Clique Size'] = 'NA'
                network_features['Max Clique Size'] = 'NA'

                # else:
                # try:
                #     all_in_degrees = nx.DiGraph.in_degree(graph)
                #     all_out_degrees = nx.DiGraph.out_degree(graph)
                #
                #     mean_in_degrees = np.mean(all_in_degrees.values())
                #     mean_out_degrees = np.mean(all_out_degrees.values())
                #
                #     network_features['Avg. In Degree'] = mean_in_degrees
                #     network_features['Ave. Out Degree'] = mean_out_degrees
                # except:
                #     network_features['Avg. In Degree'] = 'NA'
                #     network_features['Ave. Out Degree'] = 'NA'

    # Nodes Features Calculation

    for node in graph.nodes():

        node_features = dict()

        try:
            node_features['group'] = network_name
        except:
            node_features['group'] = 'NA'

        if _abc:
            try:
                node_features['Betweenness Centrality'] = bcn[node]
            except:
                node_features['Betweenness Centrality'] = 'NA'

        if _acc:
            try:
                node_features['Closeness Centrality'] = ccn[node]
            except:
                node_features['Closeness Centrality'] = 'NA'

        if _adc:
            try:
                node_features['Degree Centrality'] = dcn[node]
            except:
                node_features['Degree Centrality'] = 'NA'

        if _alc:
            try:
                node_features['Load Centrality'] = lc[node]
            except:
                node_features['Load Centrality'] = 'NA'

        if _ae:
            try:
                node_features['Eccentricity'] = eccentricity[node]
            except:
                node_features['Eccentricity'] = 'NA'

        if not graph.is_multigraph():
            if _akc:
                try:
                    node_features['Katz Centrality'] = katz[node]
                except:
                    node_features['Katz Centrality'] = 'NA'

            if _ap:
                try:
                    node_features['PageRank'] = pagerank[node]
                except:
                    node_features['PageRank'] = 'NA'

        if not nx.is_directed(graph):
            # try:
            #     node_features['Degree'] = all_degrees[node]
            # except:
            #     node_features['Degree'] = 'NA'

            if not graph.is_multigraph():
                if _acoc:
                    try:
                        node_features['Communicability Centrality'] = cc[node]
                    except:
                        node_features['Communicability Centrality'] = 'NA'
                        # else:
                        # try:
                        #     node_features['In Degree'] = all_in_degrees[node]
                        # except:
                        #     node_features['In Degree'] = 'NA'
                        #
                        # try:
                        #     node_features['Out Degree'] = all_out_degrees[node]
                        # except:
                        #     node_features['Out Degree'] = 'NA'

        node_features_list.append(node_features)

    return network_features, node_features_list
Example #54
0
def new_centrality(graph):
    """ Compute centrality scores for a network graph.

    Compute a number of different centrality and misc. scores for all nodes in a network graph.

    Parameters
    ----------
    graph : networkX graph object

    Returns
    -------
    core_df : Pandas DataFrame object

    """

    core_df = pd.DataFrame()
    core_df['artist'] = graph.nodes(
    )  # Add to the artist column all nodes (artists) in a graph.
    scores_list = []

    try:
        deg_cent = pd.DataFrame.from_dict(nx.degree_centrality(graph),
                                          orient='index',
                                          columns=['deg_cent'])
        scores_list.append(deg_cent)
    except:
        pass

    try:
        load_cent = pd.DataFrame.from_dict(nx.load_centrality(graph),
                                           orient='index',
                                           columns=['load_cent'])
        scores_list.append(load_cent)
    #between_cent = nx.betweenness_centrality(graph)
    except:
        pass

    try:
        page_rank = pd.DataFrame.from_dict(nx.pagerank_numpy(graph),
                                           orient='index',
                                           columns=['page_rank'])
        scores_list.append(page_rank)
    except:
        pass

    try:
        ev_cent = pd.DataFrame.from_dict(
            nx.eigenvector_centrality_numpy(graph),
            orient='index',
            columns=['ev_cent'])
        scores_list.append(ev_cent)
    except:
        pass

    try:
        cl_cent = pd.DataFrame.from_dict(nx.closeness_centrality(graph),
                                         orient='index',
                                         columns=['close_cent'])
        scores_list.append(cl_cent)
    except:
        pass

    try:
        cfcc = pd.DataFrame.from_dict(
            nx.current_flow_closeness_centrality(graph),
            orient='index',
            columns=['cf_close_cent'])
        scores_list.append(cfcc)
    except:
        pass
    """
    try:
        ic = pd.DataFrame.from_dict(nx.information_centrality(graph), orient = 'index',  columns = ['info_cent'])
        scores_list.append(ic)
    except:
        pass

        #ebc = pd.DataFrame.from_dict(nx.edge_betweenness_centrality(graph), orient = 'index',  columns = ['edge_bet_cent'])

    try:
        cfbc = pd.DataFrame.from_dict(nx.current_flow_betweenness_centrality(graph), orient = 'index',  columns = ['edge_cflow_cent'])
        scores_list.append(cfbc)
    except:
        pass
    #ecfbc = pd.DataFrame.from_dict(nx.edge_current_flow_betweenness_centrality(graph), orient = 'index',  columns = ['cf_between_cent'])

    try:
        acfbc = pd.DataFrame.from_dict(nx.approximate_current_flow_betweenness_centrality(graph), orient = 'index',  columns = ['appx.cfbt_cent'])
        scores_list.append(acfbc)
    except:
        pass
    #elc = pd.DataFrame.from_dict(nx.edge_load_centrality(graph), orient = 'index',  columns = ['edge_load_cent'])
    """
    try:
        hc = pd.DataFrame.from_dict(nx.harmonic_centrality(graph),
                                    orient='index',
                                    columns=['harm_cent'])
        scores_list.append(hc)
    except:
        pass
    #d = pd.DataFrame.from_dict(nx.dispersion(graph), orient = 'index',  columns = ['dispersion'])
    """
    try:
        soc = pd.DataFrame.from_dict(nx.second_order_centrality(graph), orient = 'index',  columns = ['sec_ord_cent'])
        scores_list.append(soc)
    except:
        pass
    """
    df = pd.concat(scores_list, axis=1)

    core_df = core_df.merge(df, left_on='artist', right_index=True)

    core_df['mean_cent'] = core_df.apply(
        lambda row: np.mean(row[1:]),  #  Calculate the mean of the row
        axis=1)
    return core_df
Example #55
0
def drawGraph(G, pos, a, labels):

    print(globalLabSize, "glob")
    #Changing G for threshold here

    #Check filter metric
    if (globalOptionsMet4 == "Default"):
        G = G

    if (globalOptionsMet4 == "Degree"):
        displayedNodes = []
        nx.set_node_attributes(G,
                               values=nx.degree_centrality(G),
                               name='degree')
        for node, data in G.nodes(data=True):
            if (data['degree'] > threshold):
                displayedNodes.append(node)
        G = G.subgraph(displayedNodes)

    if (globalOptionsMet4 == "Between Centrality"):
        displayedNodes = []
        # Metrics computing
        betweenCentralities = nx.betweenness_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=betweenCentralities,
                               name='betweenCentrality')

        # iterate trough nodes
        for node, data in G.nodes(data=True):
            if (data['betweenCentrality'] > threshold):
                displayedNodes.append(node)

        G = G.subgraph(displayedNodes)

    if (globalOptionsMet4 == "Load Centrality"):
        displayedNodes = []
        # Metrics computing
        loadCentralities = nx.load_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=loadCentralities,
                               name='loadCentrality')

        # iterate trough nodes
        for node, data in G.nodes(data=True):
            if (data['loadCentrality'] > threshold):
                displayedNodes.append(node)

        G = G.subgraph(displayedNodes)

    if (globalOptionsMet4 == "Subgraph Centrality"):
        displayedNodes = []
        # Metrics computing
        subgraphCentralities = nx.subgraph_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=subgraphCentralities,
                               name='subgraphCentrality')

        # iterate trough nodes
        for node, data in G.nodes(data=True):
            if (data['subgraphCentrality'] > threshold):
                displayedNodes.append(node)

        G = G.subgraph(displayedNodes)

    #Choosing size,  the size list will be defined here for the rest of the function
    sizeM = maxSize
    sizes = []
    if (globalOptionsMet3 == "Default"):
        for node, data in G.nodes(data=True):
            sizes.append(sizeM)
    if (globalOptionsMet3 == "Degree"):
        degrees = nx.degree_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G, values=degrees, name='degree')
        # minmax
        minDeg = min(degrees.values())
        maxDeg = max(degrees.values())
        #loop trough nodes
        for node, data in G.nodes(data=True):
            sizes.append(
                (normalizeSize(data['degree'], maxDeg, minDeg, sizeM) + 5))

    if (globalOptionsMet3 == "Between Centrality"):
        # Metrics computing
        betweenCentralities = nx.betweenness_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=betweenCentralities,
                               name='betweenCentrality')
        # minmax
        minBetween = min(betweenCentralities.values())
        maxBetween = max(betweenCentralities.values())
        #loop trough nodes
        for node, data in G.nodes(data=True):
            sizes.append((normalizeSize(data['betweenCentrality'], maxBetween,
                                        minBetween, sizeM) + 5))

    if (globalOptionsMet3 == "Load Centrality"):
        # Metrics computing
        loadCentralities = nx.load_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=loadCentralities,
                               name='loadCentrality')
        # minmax
        minLoad = min(loadCentralities.values())
        maxLoad = max(loadCentralities.values())
        #loop trough nodes
        for node, data in G.nodes(data=True):
            sizes.append((normalizeSize(data['loadCentrality'], maxLoad,
                                        minLoad, sizeM) + 5))

    if (globalOptionsMet3 == "Subgraph Centrality"):
        # Metrics computing
        subgraphCentralities = nx.subgraph_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=subgraphCentralities,
                               name='subgraphCentrality')
        # minmax
        minSub = min(subgraphCentralities.values())
        maxSub = max(subgraphCentralities.values())
        #loop trough nodes
        for node, data in G.nodes(data=True):
            sizes.append((normalizeSize(data['subgraphCentrality'], maxSub,
                                        minSub, sizeM) + 5))

    #Choosing cmap for colors
    cmapChosen = plt.cm.viridis
    if (cmap1 == "Viridis"):
        cmapChosen = plt.cm.viridis
    if (cmap1 == "Magma"):
        cmapChosen = plt.cm.magma
    if (cmap1 == "Plasma"):
        cmapChosen = plt.cm.plasma
    if (cmap1 == "Blues"):
        cmapChosen = plt.cm.Blues
    if (cmap1 == "Purples"):
        cmapChosen = plt.cm.Purples
    if (cmap1 == "Reds"):
        cmapChosen = plt.cm.Reds
    if (cmap1 == "Greens"):
        cmapChosen = plt.cm.Greens
    if (cmap1 == "YlOrRd"):
        cmapChosen = plt.cm.YlOrRd

    #drawing
    if (globalOptionsMet2 == "Default"):
        labelDic = nx.get_node_attributes(G, 'label')
        nx.draw_networkx_nodes(G,
                               pos=pos,
                               ax=a,
                               node_color=range(len(G)),
                               cmap=cmapChosen,
                               node_size=sizes)
        nx.draw_networkx_edges(G,
                               pos,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity,
                               ax=a)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    font_size=globalLabSize,
                                    ax=a,
                                    font_color=globalLabelCol)
    if (globalOptionsMet2 == "Communities"):
        cmapUsed = ""
        if (cmap1 == "Pale"):
            cmapUsed = ListedColormap(
                palettable.colorbrewer.qualitative.Set3_12.mpl_colors,
                N=len(G))
        if (cmap1 == "Bright"):
            cmapUsed = ListedColormap(
                palettable.colorbrewer.qualitative.Set1_9.mpl_colors, N=len(G))

        partition = community.best_partition(G)
        labelSet = nx.get_node_attributes(G, 'label')
        size = float(len(set(partition.values())))
        count = 0.
        for com in set(partition.values()):
            count = count + 1.
            list_nodes = [
                nodes for nodes in partition.keys() if partition[nodes] == com
            ]
            nx.draw_networkx_nodes(G,
                                   pos,
                                   list_nodes,
                                   label=labelSet,
                                   node_color=cmapUsed(com),
                                   ax=a,
                                   node_size=sizes)
        nx.draw_networkx_edges(G,
                               pos,
                               ax=a,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    ax=a,
                                    font_size=globalLabSize,
                                    font_color=globalLabelCol)
    if (globalOptionsMet2 == "Degree"):
        # Metrics computing
        degrees = nx.degree_centrality(G)
        # Add metrics as params of the nodes
        nx.set_node_attributes(G, values=degrees, name='degree')

        # minmax
        minDeg = min(degrees.values())
        maxDeg = max(degrees.values())

        colDeg = []

        # Set the colors that could be used next
        for node, data in G.nodes(data=True):
            colDeg.append(normalize(data['degree'], maxDeg, minDeg))

        nx.draw_networkx_nodes(G,
                               pos=pos,
                               vmax=1,
                               vmin=0,
                               cmap=cmapChosen,
                               with_labels=labels,
                               node_size=sizes,
                               node_color=colDeg,
                               ax=a)
        nx.draw_networkx_edges(G,
                               pos,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity,
                               ax=a)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    font_size=globalLabSize,
                                    ax=a,
                                    font_color=globalLabelCol)

    if (globalOptionsMet2 == "Between Centrality"):
        # Metrics computing
        betweenCentralities = nx.betweenness_centrality(G)

        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=betweenCentralities,
                               name='betweenCentrality')

        # minmax
        minBetween = min(betweenCentralities.values())
        maxBetween = max(betweenCentralities.values())

        colBetween = []

        # Set the colors that could be used next
        for node, data in G.nodes(data=True):
            colBetween.append(
                normalize(data['betweenCentrality'], maxBetween, minBetween))

        nx.draw_networkx_nodes(G,
                               pos=pos,
                               vmax=1,
                               vmin=0,
                               cmap=cmapChosen,
                               with_labels=labels,
                               node_size=sizes,
                               node_color=colBetween,
                               ax=a)
        nx.draw_networkx_edges(G,
                               pos,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity,
                               ax=a)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    font_size=globalLabSize,
                                    ax=a,
                                    font_color=globalLabelCol)

    if (globalOptionsMet2 == "Subgraph Centrality"):
        # Metrics computing
        subgraphCentralities = nx.subgraph_centrality(G)

        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=subgraphCentralities,
                               name='subgraphCentrality')

        # minmax
        minSub = min(subgraphCentralities.values())
        maxSub = max(subgraphCentralities.values())

        colSub = []

        # Set the colors that could be used next
        for node, data in G.nodes(data=True):
            colSub.append(normalize(data['subgraphCentrality'], maxSub,
                                    minSub))

        nx.draw_networkx_nodes(G,
                               pos=pos,
                               vmax=1,
                               vmin=0,
                               cmap=cmapChosen,
                               with_labels=labels,
                               node_size=sizes,
                               node_color=colSub,
                               ax=a)
        nx.draw_networkx_edges(G,
                               pos,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity,
                               ax=a)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    font_size=globalLabSize,
                                    ax=a,
                                    font_color=globalLabelCol)

    if (globalOptionsMet2 == "Load Centrality"):
        # Metrics computing
        loadCentralities = nx.load_centrality(G)

        # Add metrics as params of the nodes
        nx.set_node_attributes(G,
                               values=loadCentralities,
                               name='loadCentrality')

        # minmax
        minLoad = min(loadCentralities.values())
        maxLoad = max(loadCentralities.values())

        colLoad = []

        # Set the colors that could be used next
        for node, data in G.nodes(data=True):
            colLoad.append(normalize(data['loadCentrality'], maxLoad, minLoad))

        nx.draw_networkx_nodes(G,
                               pos=pos,
                               vmax=1,
                               vmin=0,
                               cmap=cmapChosen,
                               with_labels=labels,
                               node_size=sizes,
                               node_color=colLoad,
                               ax=a)
        nx.draw_networkx_edges(G,
                               pos,
                               edge_color=globalEdgeCol,
                               style=globalEdgeType,
                               alpha=globalEdgeOpacity,
                               ax=a)
        if (labels):
            nx.draw_networkx_labels(G,
                                    pos,
                                    font_size=globalLabSize,
                                    ax=a,
                                    font_color=globalLabelCol)
Example #56
0
closeness_centrality = nx.closeness_centrality(G)
print(closeness_centrality)
with open('../data/closeness_centrality.pkl', 'wb') as file:
    pickle.dump(closeness_centrality, file)

eigenvector_centrality = nx.eigenvector_centrality(G)
print(eigenvector_centrality)
with open('../data/eigenvector_centrality.pkl', 'wb') as file:
    pickle.dump(eigenvector_centrality, file)

# betweenness_centrality is very waste of time
betweenness_centrality = nx.betweenness_centrality(G)
print(betweenness_centrality)
with open('../data/betweenness_centrality.pkl', 'wb') as file:
    pickle.dump(betweenness_centrality, file)

harmonic_centrality = nx.harmonic_centrality(G)
print(harmonic_centrality)
with open('../data/harmonic_centrality.pkl', 'wb') as file:
    pickle.dump(harmonic_centrality, file)

load_centrality = nx.load_centrality(G)
print(load_centrality)
with open('../data/load_centrality.pkl', 'wb') as file:
    pickle.dump(load_centrality, file)

subgraph_centrality = nx.subgraph_centrality(G)
print(subgraph_centrality)
with open('../data/subgraph_centrality.pkl', 'wb') as file:
    pickle.dump(subgraph_centrality, file)
Example #57
0
    def find_keywords(self, document, input_type="file", validate=False):

        if validate == True:
            distance_method = "editdistance"
        else:
            distance_method = self.distance_method

        limit_num_keywords = self.hyperparameters['num_keywords']
        if "lemmatizer" in self.hyperparameters:
            lemmatizer = self.hyperparameters['lemmatizer']
        else:
            lemmatizer = None
        double_weight_threshold = self.hyperparameters[
            'bigram_count_threshold']
        stopwords = self.hyperparameters['stopwords']
        num_tokens = self.hyperparameters['num_tokens']
        distance_threshold = self.hyperparameters['distance_threshold']
        pair_diff_length = self.hyperparameters['pair_diff_length']

        all_terms = set()
        klens = {}

        weighted_graph, reps = self.corpus_graph(document,
                                                 lemmatizer=lemmatizer,
                                                 stopwords=stopwords,
                                                 input_type=input_type)
        nn = len(list(weighted_graph.nodes()))

        if distance_threshold > 0:
            self.centrality = nx.load_centrality(weighted_graph)
            self.hypervertex_prunning(weighted_graph,
                                      distance_threshold,
                                      pair_diff_max=pair_diff_length,
                                      distance_method=distance_method)

        nn2 = len(list(weighted_graph.nodes()))

        self.initial_tokens = nn
        self.pruned_tokens = nn2

        if self.verbose:
            logging.info("Number of nodes reduced from {} to {}".format(
                nn, nn2))

        pgx = nx.load_centrality(weighted_graph)

        ## assign to global vars
        self.keyword_graph = weighted_graph
        self.centrality = pgx

        keywords_with_scores = sorted(pgx.items(),
                                      key=operator.itemgetter(1),
                                      reverse=True)
        kw_map = dict(keywords_with_scores)

        if reps and 2 in num_tokens or 3 in num_tokens:

            higher_order_1 = []
            higher_order_2 = []
            frequent_pairs = []
            ## Check potential edges
            for edge in weighted_graph.edges(data=True):
                if edge[0] != edge[1]:
                    if "weight" in edge[2]:
                        if edge[2]['weight'] > double_weight_threshold:
                            frequent_pairs.append(edge[0:2])

            ## Traverse the frequent pairs
            for pair in frequent_pairs:
                w1 = pair[0]
                w2 = pair[1]
                if w1 in kw_map and w2 in kw_map:
                    score = np.mean([kw_map[w1], kw_map[w2]])
                    if not w1 + " " + w2 in all_terms:
                        higher_order_1.append((w1 + " " + w2, score))
                        all_terms.add(w1 + " " + w2)

            ## Three word keywords are directed paths.
            three_gram_candidates = []
            for pair in frequent_pairs:
                for edge in weighted_graph.in_edges(pair[0]):
                    if edge[0] in kw_map:
                        trip_score = [
                            kw_map[edge[0]], kw_map[pair[0]], kw_map[pair[1]]
                        ]
                        term = edge[0] + " " + pair[0] + " " + pair[1]
                        score = np.mean(trip_score)
                        if not term in all_terms:
                            higher_order_2.append((term, score))
                            all_terms.add(term)

                for edge in weighted_graph.out_edges(pair[1]):
                    if edge[1] in kw_map:
                        trip_score = [
                            kw_map[edge[1]], kw_map[pair[0]], kw_map[pair[1]]
                        ]
                        term = pair[0] + " " + pair[1] + " " + edge[1]
                        score = np.mean(trip_score)
                        if not term in all_terms:
                            higher_order_2.append((term, score))
                            all_terms.add(term)
        else:
            higher_order_1 = []
            higher_order_2 = []

        total_keywords = []
        if 1 in num_tokens:
            total_keywords += keywords_with_scores
        if 2 in num_tokens:
            total_keywords += higher_order_1
        if 3 in num_tokens:
            total_keywords += higher_order_2

        total_kws = sorted(set(total_keywords),
                           key=operator.itemgetter(1),
                           reverse=True)[0:limit_num_keywords]

        return total_kws
Example #58
0
def caracteristicas(instancias):
    Alg1 = [
        nx.degree_centrality(instancias[0].G),
        nx.clustering(instancias[0].G),
        nx.closeness_centrality(instancias[0].G),
        nx.load_centrality(instancias[0].G),
        nx.eccentricity(instancias[0].G),
        nx.pagerank(instancias[0].G)
    ]
    i = 1
    Datos1 = []
    for alg in Alg1:
        valores = [j for j in alg.values()]
        if i == 5:
            suma = np.sum(valores)
            valores = [j / suma for j in valores]
            for j in valores:
                Datos1.append([i, j])
        else:
            for j in valores:
                Datos1.append([i, j])
        i = i + 1

    Alg2 = [
        nx.degree_centrality(instancias[1].G),
        nx.clustering(instancias[1].G),
        nx.closeness_centrality(instancias[1].G),
        nx.load_centrality(instancias[1].G),
        nx.eccentricity(instancias[1].G),
        nx.pagerank(instancias[1].G)
    ]
    i = 1
    Datos2 = []
    for alg in Alg2:
        valores = [j for j in alg.values()]
        if i == 5:
            suma = np.sum(valores)
            valores = [j / suma for j in valores]
            for j in valores:
                Datos2.append([i, j])
        else:
            for j in valores:
                Datos2.append([i, j])
        i = i + 1

    Alg3 = [
        nx.degree_centrality(instancias[2].G),
        nx.clustering(instancias[2].G),
        nx.closeness_centrality(instancias[2].G),
        nx.load_centrality(instancias[2].G),
        nx.eccentricity(instancias[2].G),
        nx.pagerank(instancias[2].G)
    ]
    i = 1
    Datos3 = []
    for alg in Alg3:
        valores = [j for j in alg.values()]
        if i == 5:
            suma = np.sum(valores)
            valores = [j / suma for j in valores]
            for j in valores:
                Datos3.append([i, j])
        else:
            for j in valores:
                Datos3.append([i, j])
        i = i + 1

    Alg4 = [
        nx.degree_centrality(instancias[3].G),
        nx.clustering(instancias[3].G),
        nx.closeness_centrality(instancias[3].G),
        nx.load_centrality(instancias[3].G),
        nx.eccentricity(instancias[3].G),
        nx.pagerank(instancias[3].G)
    ]
    i = 1
    Datos4 = []
    for alg in Alg4:
        valores = [j for j in alg.values()]
        if i == 5:
            suma = np.sum(valores)
            valores = [j / suma for j in valores]
            for j in valores:
                Datos4.append([i, j])
        else:
            for j in valores:
                Datos4.append([i, j])
        i = i + 1

    Alg5 = [
        nx.degree_centrality(instancias[4].G),
        nx.clustering(instancias[4].G),
        nx.closeness_centrality(instancias[4].G),
        nx.load_centrality(instancias[4].G),
        nx.eccentricity(instancias[4].G),
        nx.pagerank(instancias[4].G)
    ]
    i = 1
    Datos5 = []
    for alg in Alg5:
        valores = [j for j in alg.values()]
        if i == 5:
            suma = np.sum(valores)
            valores = [j / suma for j in valores]
            for j in valores:
                Datos5.append([i, j])
        else:
            for j in valores:
                Datos5.append([i, j])
        i = i + 1

    Datos1 = pd.DataFrame(Datos1, columns=['A', 'V'])
    Datos2 = pd.DataFrame(Datos2, columns=['A', 'V'])
    Datos3 = pd.DataFrame(Datos3, columns=['A', 'V'])
    Datos4 = pd.DataFrame(Datos4, columns=['A', 'V'])
    Datos5 = pd.DataFrame(Datos5, columns=['A', 'V'])

    bplot = sns.boxplot(y='V',
                        x='A',
                        data=Datos1,
                        width=0.4,
                        palette="colorblind")
    #bplot.axes.set_title("Comparación por algoritmo de la instancia 1",fontsize=13)
    bplot.set_xlabel("Algoritmo", fontsize=11)
    bplot.set_ylabel("Valor", fontsize=11)
    bplot.tick_params(labelsize=9)
    plt.savefig("Grafo_1.eps")
    plt.show()

    bplot = sns.boxplot(y='V',
                        x='A',
                        data=Datos2,
                        width=0.4,
                        palette="colorblind")
    #bplot.axes.set_title("Comparación por algoritmo del instancia 2",fontsize=13)
    bplot.set_xlabel("Algoritmo", fontsize=11)
    bplot.set_ylabel("Valor", fontsize=11)
    bplot.tick_params(labelsize=9)
    plt.savefig("Grafo_2.eps")
    plt.show()

    bplot = sns.boxplot(y='V',
                        x='A',
                        data=Datos3,
                        width=0.4,
                        palette="colorblind")
    #bplot.axes.set_title("Comparación por algoritmo del instancia 3",fontsize=13)
    bplot.set_xlabel("Algoritmo", fontsize=11)
    bplot.set_ylabel("Valor", fontsize=11)
    bplot.tick_params(labelsize=9)
    plt.savefig("Grafo_3.eps")
    plt.show()

    bplot = sns.boxplot(y='V',
                        x='A',
                        data=Datos4,
                        width=0.4,
                        palette="colorblind")
    #bplot.axes.set_title("Comparación por algoritmo del instancia 4",fontsize=13)
    bplot.set_xlabel("Algoritmo", fontsize=11)
    bplot.set_ylabel("Valor", fontsize=11)
    bplot.tick_params(labelsize=9)
    plt.savefig("Grafo_4.eps")
    plt.show()

    bplot = sns.boxplot(y='V',
                        x='A',
                        data=Datos5,
                        width=0.4,
                        palette="colorblind")
    #bplot.axes.set_title("Comparación por algoritmo del instancia 5",fontsize=13)
    bplot.set_xlabel("Algoritmo", fontsize=11)
    bplot.set_ylabel("Valor", fontsize=11)
    bplot.tick_params(labelsize=9)
    plt.savefig("Grafo_5.eps")
    plt.show()
Example #59
0
 def load_centrality(self):
     self.load_centrality_dict = nx.load_centrality(self.G)
Example #60
0
        width[r]=R
        G.add_edge(lista[r][0],lista[r][1],capacity=R)
    for w in range(ordenes[i]):
        initial=final=0        
        while initial==final:
            initial=random.randint(0,round(len(G.nodes)/2))
            final=random.randint(initial,len(G.nodes)-2)
            
        
        tiempo_inicial=time()
        T=nx.maximum_flow(G, initial, final)
        tiempo_final =time()
        tiempo_ejecucion=tiempo_final- tiempo_inicial
        
        data[contador,2]=nx.clustering(G, nodes=initial)
        data[contador,3]=nx.load_centrality(G, v=initial)
        data[contador,4]=nx.closeness_centrality(G, u=initial)
        data[contador,5]=nx.eccentricity(G, v=initial)
        data[contador,6]=nx.pagerank(G, alpha=0.9)[initial]
#-------------------------------------------------------------------------------------------------------------#
        data[contador,7]=nx.clustering(G, nodes=final)
        data[contador,8]=nx.load_centrality(G, v=final)
        data[contador,9]=nx.closeness_centrality(G, u=final)
        data[contador,10]=nx.eccentricity(G, v=final)
        data[contador,11]=nx.pagerank(G, alpha=0.9)[final]
#--------------------------------------------------------------------------------------------------------------#
        data[contador,0]=T[0]
        data[contador,1]=tiempo_ejecucion
        data[contador,12]=ordenes[i]
        contador+=1
        lista_flujo=[]