Пример #1
0
 def test_undirected_weighted_star(self):
     G = nx.Graph()
     G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)])
     centrality = nx.local_reaching_centrality(
         G, 1, normalized=False, weight="weight"
     )
     assert centrality == 1.5
Пример #2
0
def calculate_networks_indicators(graph):
    """计算基本网络指标"""
    degree_centrality = nx.degree_centrality(graph)
    nodes = list(degree_centrality.keys())
    betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
    network_indicators = pd.DataFrame({
        'nodes':
        nodes,
        'degree_centrality': [degree_centrality[node] for node in nodes],
        'betweenness_centrality':
        [betweenness_centrality[node] for node in nodes]
    })

    network_indicators['local_reaching_centrality'] = [
        nx.local_reaching_centrality(graph, node, weight='weight')
        for node in nodes
    ]
    constraint = nx.constraint(graph, weight='weight')
    network_indicators['constraint'] = [constraint[node] for node in nodes]
    effective_size = nx.effective_size(graph, weight='weight')
    network_indicators['effective_size'] = [
        effective_size[node] for node in nodes
    ]
    triangles = nx.triangles(graph)
    network_indicators['triangles'] = [triangles[node] for node in nodes]
    clustering = nx.clustering(graph, weight='weight')
    network_indicators['clustering'] = [clustering[node] for node in nodes]

    weight_dict = {
        item[0]: item[1]
        for item in nx.degree(graph, weight='weight')
    }
    degree_dict = {item[0]: item[1] for item in nx.degree(graph)}
    average_weight_dict = {
        weight_key:
        (weight_dict[weight_key] /
         degree_dict[weight_key] if degree_dict[weight_key] != 0 else 0)
        for weight_key in weight_dict.keys()
    }
    network_indicators['tie_strength'] = [
        average_weight_dict[node] for node in nodes
    ]
    network_indicators['number_of_node'] = nx.number_of_nodes(graph)
    network_indicators['density'] = nx.density(graph)
    cliques = nx.graph_clique_number(graph)
    if cliques >= 3:
        network_indicators['cliques'] = cliques
    else:
        network_indicators['cliques'] = 0
    network_indicators['efficiency'] = nx.global_efficiency(graph)
    network_indicators['isolates'] = nx.number_of_isolates(graph)

    network_indicators = network_indicators[[
        'nodes', 'degree_centrality', 'betweenness_centrality',
        'local_reaching_centrality', 'constraint', 'effective_size',
        'triangles', 'clustering', 'tie_strength', 'number_of_node', 'density',
        'cliques', 'efficiency', 'isolates'
    ]]
    return network_indicators
Пример #3
0
def lrc(G, Gnx):
	cent=[]
	for node in G.iternodes():
		c = nx.local_reaching_centrality(Gnx, node)
		if c==1.0:
			cent.append(node)
	return cent
	
def get_local_reaching(G):
    deg = {}
    for x in range(
            len(G)):  # compute local reach centrality for each node in G
        deg[x] = nx.local_reaching_centrality(G, x)
    deg_df = pd.Series(deg).to_frame()
    deg_df.columns = ['Reaching']
    deg_df['reach_rank'] = deg_df['Reaching'].rank(method='min',
                                                   ascending=False)
    return deg_df
Пример #5
0
def basic_information(graph, nodes, year, indicator_type):
    betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
    local_reaching_centrality = [
        nx.local_reaching_centrality(graph, node, weight='weight')
        for node in nodes
    ]
    triangles = nx.triangles(graph)
    clustering = nx.clustering(graph, weight='weight')
    weight_dict = {
        item[0]: item[1]
        for item in nx.degree(graph, weight='weight')
    }
    degree_dict = {item[0]: item[1] for item in nx.degree(graph)}
    average_weight_dict = {
        weight_key:
        (weight_dict[weight_key] /
         degree_dict[weight_key] if degree_dict[weight_key] != 0 else 0)
        for weight_key in weight_dict.keys()
    }
    data = pd.DataFrame({
        'nodes':
        nodes,
        'betweenness_centrality':
        [betweenness_centrality[node] for node in nodes],
        'local_reaching_centrality':
        local_reaching_centrality,
        'triangles': [triangles[node] for node in nodes],
        'clustering': [clustering[node] for node in nodes],
        'tie_strength': [average_weight_dict[node] for node in nodes]
    })
    if indicator_type == '三年期':
        excel_path = '../data/生成数据/04关系矩阵_中间指标/三年期/' + str(year) + '-' + str(
            year + 2) + '年竞争关系矩阵'
    else:
        excel_path = '../data/生成数据/04关系矩阵_中间指标/五年期/' + str(year) + '-' + str(
            year + 4) + '年竞争关系矩阵'
    folder = os.path.exists(excel_path)
    if not folder:
        os.makedirs(excel_path)
    data.to_excel(excel_writer=excel_path + '/基础指标.xlsx', index=False)
    print(str(year) + '年' + 'basic_information' + '计算完毕!')
    def compute_subgraph_center(self, subgraph):

        if self.method == 'betweenness_centrality':
            d = nx.betweenness_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'betweenness_centrality_subset':
            d = nx.betweenness_centrality_subset(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'information_centrality':
            d = nx.information_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'local_reaching_centrality':

            d = {}
            for n in self.G.nodes():
                d[n] = nx.local_reaching_centrality(self.G, n, weight='weight')

            center = max(d, key=d.get)

        elif self.method == 'voterank':
            d = nx.voterank(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'percolation_centrality':
            d = nx.percolation_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'subgraph_centrality':
            d = nx.subgraph_centrality(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'subgraph_centrality_exp':
            d = nx.subgraph_centrality_exp(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'estrada_index':
            d = nx.estrada_index(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'second_order_centrality':
            d = nx.second_order_centrality(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'eigenvector_centrality':

            d = nx.eigenvector_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)
        elif self.method == 'load_centrality':

            d = nx.load_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'closeness_centrality':
            d = nx.closeness_centrality(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'current_flow_closeness_centrality':
            d = nx.current_flow_closeness_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'current_flow_betweenness_centrality':
            d = nx.current_flow_betweenness_centrality(subgraph,
                                                       weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'current_flow_betweenness_centrality_subset':
            d = nx.current_flow_betweenness_centrality_subset(subgraph,
                                                              weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'approximate_current_flow_betweenness_centrality':
            d = nx.approximate_current_flow_betweenness_centrality(
                subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'harmonic_centrality':
            d = nx.harmonic_centrality(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'page_rank':

            d = nx.pagerank(subgraph, weight='weight')
            center = max(d, key=d.get)

        elif self.method == 'hits':

            d = nx.hits(subgraph)
            center = max(d, key=d.get)

        elif self.method == 'katz_centrality':
            d = nx.katz_centrality(subgraph, weight='weight')
            center = max(d, key=d.get)

        else:
            new_centers = nx.center(subgraph)

            # new_centers gives a list of centers and here we just pick one randomly --not good for stability
            # to do : find a better way to choose the center--make it stable

            index = random.randint(0, len(new_centers) - 1)

            center = new_centers[index]
        return center
    lambda G: nx.closeness_centrality(G.to_undirected()),
    "betweenness_centrality":
    nx.betweenness_centrality,
    "betweenness_centrality_undirected":
    lambda G: nx.betweenness_centrality(G.to_undirected()),
    "load_centrality":
    nx.load_centrality,
    "load_centrality_undirected":
    lambda G: nx.load_centrality(G.to_undirected()),
    "harmonic_centrality":
    nx.harmonic_centrality,
    "harmonic_centrality_undirected":
    lambda G: nx.harmonic_centrality(G.to_undirected()),
    "local_reaching_centrality":
    lambda G:
    {node: nx.local_reaching_centrality(G, node)
     for node in G.nodes},
    "local_reaching_centrality_undirected":
    lambda G: {
        node: nx.local_reaching_centrality(G.to_undirected(), node)
        for node in G.nodes
    },
    #     "participation_coefficient": get_participation_coefficient,
    "participation_coefficient_undirected":
    get_participation_coefficient_undirected,
    "within_community_strength_undirected":
    within_community_strength_undirected
}

fun_degree_measures = {
    "degree": lambda G: dict(G.degree()),  #dict(G.degree(G.nodes))
# Read `LPedges1mode.csv` to a DataFrame:
typeAedges1mode_df = pd.read_csv('{}.csv'.format(args.inputFileName1[0]))

# Create a master list that contains all edges,
# where each edge is a tuple of the format (LP,VC):
allEdges = [(row.From, row.To) for row in typeAedges1mode_df.itertuples()]

# Create the network graph object:
graph1mode = net.Graph()
graph1mode.add_edges_from(allEdges)

# Compute the various centralities, return a dictionary of {node:Xcentrality}:
degCentrality = net.degree(graph1mode)
eigCentrality = net.eigenvector_centrality(graph1mode)
reachCentrality = dict([(n, net.local_reaching_centrality(graph1mode, n)) \
                        for n in graph1mode.nodes()])

# Read 'LPnodes.csv` to a DataFrame:
typeAnodesCentrality1mode_df = pd.read_csv('{}.csv'.format(
    args.inputFileName2[0]))

# Add 3 new columns for the 3 centralities, mapping from their respective dicts:
typeAnodesCentrality1mode_df['degree'] = \
    typeAnodesCentrality1mode_df['label'].map(degCentrality, na_action = 'ignore')
typeAnodesCentrality1mode_df['eigenvector'] = \
    typeAnodesCentrality1mode_df['label'].map(eigCentrality, na_action = 'ignore')
typeAnodesCentrality1mode_df['reach'] = \
    typeAnodesCentrality1mode_df['label'].map(reachCentrality, na_action = 'ignore')

# Iterate through every row in `typeAnodesCentrality1mode_df`,
Пример #9
0
def mergegraph(graphs, pos_old, labels_old, edge_prob=0.3, edge_num=0.4):
    nodes = []
    edges = []
    pos = {}
    node_cnt = 0
    val = 0.9
    shift_value = [[-val, val], [val, val], [-val, -val], [val, -val]]

    comm_lables = []

    for i, g in enumerate(graphs):
        tmp_nodes = list(g.nodes())
        tmp_edges = list(g.edges())

        comm_lables += [i] * len(tmp_nodes)

        node_map = {k: node_cnt + i for k, i in enumerate(tmp_nodes)}
        node_cnt += len(tmp_nodes)

        new_nodes = [node_map[n] for n in tmp_nodes]
        new_edges = [(node_map[u], node_map[v]) for u, v in tmp_edges]

        for k, v in pos_old[i].items():
            pos_old[i][k][0] += shift_value[i][0]
            pos_old[i][k][1] += shift_value[i][1]

        new_pos = {node_map[n]: v for n, v in pos_old[i].items()}

        nodes += new_nodes
        edges += new_edges
        pos.update(new_pos)

    G = nx.DiGraph()
    G.add_edges_from(edges)

    random.shuffle(nodes)
    l = int(edge_num * len(nodes))
    u = nodes[0:l]
    random.shuffle(nodes)
    v = nodes[0:l]

    for s, t in zip(u, v):
        if random.random() < edge_prob:
            G.add_edge(s, t)
            G.add_edge(t, s)
    nodes_deg = [G.degree[i] for i in G.nodes()]

    centrality = nx.closeness_centrality(G)
    labels_central = get_labels(centrality)
    print('centrality done!')

    inf_cent = nx.information_centrality(G.to_undirected())
    labels_inf_central = get_labels(inf_cent)
    print('info centrality done!')

    betweenness = nx.betweenness_centrality(G.to_undirected())
    labels_betweenness = get_labels(betweenness)
    print('betweenness done!')

    loads = nx.load_centrality(G.to_undirected())
    labels_load = get_labels(loads)
    print('load centrality done!')

    cmm_bet = nx.communicability_betweenness_centrality(G.to_undirected())
    labels_cmm_bet = get_labels(cmm_bet)
    print('commu betweenness done!')

    sce = nx.subgraph_centrality_exp(G.to_undirected())
    labels_sce = get_labels(sce)
    print('subgraph centrality done!')

    harm = nx.harmonic_centrality(G.to_undirected())
    labels_harm = get_labels(harm)
    print('harmonic done!')

    lrc = {
        v: nx.local_reaching_centrality(G.to_undirected(), v)
        for v in G.nodes()
    }
    labels_lrc = get_labels(lrc)
    print('lrc done!')

    unq_lbl = np.unique(nodes_deg)
    lbl_map = {unq_lbl[i]: i for i in range(len(unq_lbl))}
    labels = [lbl_map[k] for k in nodes_deg]
    return G, pos, labels, comm_lables, labels_central, labels_inf_central, labels_betweenness, labels_load, labels_cmm_bet, labels_sce, labels_harm, labels_lrc
Пример #10
0
def calc_centralities(G,org_name,string_version):
    print("Calculating centralities")
    centrality_measures = {}
    string_location=f.string_version_data(string_version)[0]
    print(string_location)
    # if 1==0:
    if os.path.isfile('centrality_data/%s/%s.cent'%(string_location,org_name)):
        print("Using cached centrality data")
        file=open('centrality_data/%s/%s.cent'%(string_location,org_name))
        lines=file.readlines()
        centrality_list=lines.pop(0).strip().split(' ')
        centrality_list.pop(0)
        
        for i,centrality in enumerate(centrality_list):
            centrality_measures[centrality]={}

        for line in lines:
            value_list=line.split(' ')
            for i,centrality in enumerate(centrality_list):
                # print("%d. %s" % (i+1,centrality))
                centrality_measures[centrality][value_list[0]]=float(value_list[i+1])
    else:
        
        print("1. Degree centrality")
        centrality_measures['Degree_Centrality']=nx.degree_centrality(G)
        
        print("2. Closeness centrality")
        centrality_measures['Closeness_Centrality']=Counter(nx.algorithms.centrality.closeness_centrality(G))
        
        print("3. Betweenness centrality")
        centrality_measures['Betweenness_Centrality']=Counter(nx.algorithms.centrality.betweenness_centrality(G))
        
        print("4. Clustering coefficient")
        centrality_measures['Clustering_Co-efficient']=Counter(nx.clustering(G))
        
        print("5. Eigenvector centrality")
        centrality_measures['Eigenvector_Centrality']= nx.eigenvector_centrality(G)
        
        print("6. Subgraph centrality")
        centrality_measures["Subgraph_Centrality"]=nx.subgraph_centrality(G)
        
        print("7. Information centrality")
        centrality_measures["Information_Centrality"]=nx.current_flow_closeness_centrality(f.trim_graph(G))
        
        print("8. Clique Number")
        cliq={}
        for i in G.nodes():
           cliq[i]=nx.node_clique_number(G,i)
        centrality_measures["Clique_Number"]=cliq
        
        print("9. Edge clustering coefficient")
        edge_clus_coeff={}
        for n in G.nodes:
            edge_clus_coeff[n]=0
            for e in G.edges(n):
                num=len(list(nx.common_neighbors(G,e[0],e[1])))
                den=(min(G.degree(e[0]),G.degree(e[1]))-1)
                if den==0:
                    den=1
                edge_clus_coeff[n]+=num/den
    
        centrality_measures['Edge_Clustering_Coefficient']=edge_clus_coeff
        
        print("10. Page Rank")
        centrality_measures['Page_Rank']=nx.pagerank(G)
        
        print("11. Random Walk Betweenness Centrality")
        centrality_measures["Random_Walk_Betweenness_Centrality"]=nx.current_flow_betweenness_centrality(f.trim_graph(G))
        
        print("12. Load Centrality")
        centrality_measures["Load_Centrality"]=nx.load_centrality(G)
        
        print("13. Communicability Betweenness")
        centrality_measures["Communicability_Betweenness"]=nx.communicability_betweenness_centrality(f.trim_graph(G))
        
        print("14. Harmonic Centrality")
        centrality_measures["Harmonic_Centrality"]=nx.harmonic_centrality(G)
            
        print("15. Reaching Centrality")
        reach_cent={}
        for node in G.nodes:
            reach_cent[node] = nx.local_reaching_centrality(G,node)
        centrality_measures["Reaching_Centrality"]=reach_cent
        
        print("16. Katz Centrality(not calculated)")
    #   centrality_measures["Katz_Centrality"]=nx.katz_centrality(G)
    
        datafile=open("refex_props/%s.refex" % (org_name))
        sample_line=datafile.readline()
        s= sample_line.strip().split(' ')
        for x in range(1,len(s)):
            centrality_measures["refex#%d" % (x)]={}                
        for line in datafile:
            props=line.strip().split(" ")
            props=[i.strip('\t') for i in props]
            for x in range(1,len(s)):
                centrality_measures["refex#%d" % (x)][props[0]]=float(props[x])

        datafile=open("refex_rider_props/%s.riderproperties" % (org_name))
        sample_line=datafile.readline()
        s= sample_line.strip().split(' ')
        s.pop(1)
        print(len(s))
        for x in range(1,len(s)):
            centrality_measures["refex_rider#%d" % (x)]={}                
        
        for line in datafile:
            props=line.strip().split(" ")
            props.pop(1)
            for x in range(1,len(props)):

                centrality_measures["refex_rider#%d" % (x)][props[0]]=float(props[x])
    
     
        with open('centrality_data/%s/%s.cent'%(string_location,org_name),'w') as file:
            file.write(str(org_name)+' ')
            centrality_list=list(centrality_measures)
            for x in centrality_list:
                file.write(str(x)+' ')

            for node in G.nodes:
                file.write('\n'+node+' ')
                for x in centrality_list:
                    if node not in centrality_measures[x]:
                        file.write('-1 ')
                    else:
                        file.write(str(centrality_measures[x][node])+' ')
    return centrality_measures
Пример #11
0
def local_reaching_centrality(graph):
    return {node: nx.local_reaching_centrality(graph, node) for node in graph}
Пример #12
0
 def test_negatively_weighted(self):
     with pytest.raises(nx.NetworkXError):
         G = nx.Graph()
         G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)])
         nx.local_reaching_centrality(G, 0, weight="weight")
Пример #13
0
 def test_non_positive_weights(self):
     with pytest.raises(nx.NetworkXError):
         G = nx.DiGraph()
         G.add_weighted_edges_from([(0, 1, 0)])
         nx.local_reaching_centrality(G, 0, weight="weight")