def test_constraint_weighted_directed(self):
     D = self.D.copy()
     nx.set_edge_attributes(D, self.D_weights, "weight")
     constraint = nx.constraint(D, weight="weight")
     assert almost_equal(constraint[0], 0.840, places=3)
     assert almost_equal(constraint[1], 1.143, places=3)
     assert almost_equal(constraint[2], 1.378, places=3)
Beispiel #2
0
    def __calcCentrality(self, G, cnt):
        '''
        For calculating Graph centrality measures
        '''
        cntV = list()
        if cnt == 'deg':
            cntV = list(dict(G.degree).values())
        elif cnt == 'ei':
            cntV = list(nx.eigenvector_centrality_numpy(G).values())
        elif cnt == 'sh':
            cntV = list(nx.constraint(G).values())
        elif cnt == 'pr':
            cntV = list(nx.pagerank_numpy(G).values())
        elif cnt == 'bw':
            cntV = list(nx.betweenness_centrality(G).values())
        elif cnt == 'cl':
            cntV = list(nx.clustering(G).values())
        elif cnt == 'cc':
            cntV = list(nx.closeness_centrality(G).values())
        elif cnt == 'ec':
            cntV = list(nx.eccentricity(G).values())

        else:
            raise ValueError(
                'calcCettrality: wrong cnt value or not implemented yet')

        return cntV
Beispiel #3
0
 def test_constraint_weighted_directed(self):
     D = self.D.copy()
     nx.set_edge_attributes(D, self.D_weights, "weight")
     constraint = nx.constraint(D, weight="weight")
     assert constraint[0] == pytest.approx(0.840, abs=1e-3)
     assert constraint[1] == pytest.approx(1.143, abs=1e-3)
     assert constraint[2] == pytest.approx(1.378, abs=1e-3)
Beispiel #4
0
 def test_constraint_weighted_undirected(self):
     G = self.G.copy()
     nx.set_edge_attributes(G, self.G_weights, 'weight')
     constraint = nx.constraint(G, weight='weight')
     assert almost_equal(constraint['G'], 0.299, places=3)
     assert almost_equal(constraint['A'], 0.795, places=3)
     assert almost_equal(constraint['C'], 1, places=3)
 def test_constraint_weighted_undirected(self):
     G = self.G.copy()
     nx.set_edge_attributes(G, self.G_weights, "weight")
     constraint = nx.constraint(G, weight="weight")
     assert almost_equal(constraint["G"], 0.299, places=3)
     assert almost_equal(constraint["A"], 0.795, places=3)
     assert almost_equal(constraint["C"], 1, places=3)
 def test_constraint_weighted_directed(self):
     D = self.D.copy()
     nx.set_edge_attributes(D, 'weight', self.D_weights)
     constraint = nx.constraint(D, weight='weight')
     assert_almost_equal(round(constraint[0], 3), 0.840)
     assert_almost_equal(round(constraint[1], 3), 1.143)
     assert_almost_equal(round(constraint[2], 3), 1.378)
 def test_constraint_weighted_undirected(self):
     G = self.G.copy()
     nx.set_edge_attributes(G, 'weight', self.G_weights)
     constraint = nx.constraint(G, weight='weight')
     assert_almost_equal(round(constraint['G'], 3), 0.299)
     assert_almost_equal(round(constraint['A'], 3), 0.795)
     assert_almost_equal(round(constraint['C'], 3), 1)
Beispiel #8
0
 def test_constraint_weighted_undirected(self):
     G = self.G.copy()
     nx.set_edge_attributes(G, self.G_weights, 'weight')
     constraint = nx.constraint(G, weight='weight')
     assert_almost_equal(round(constraint['G'], 3), 0.299)
     assert_almost_equal(round(constraint['A'], 3), 0.795)
     assert_almost_equal(round(constraint['C'], 3), 1)
Beispiel #9
0
 def test_constraint_weighted_undirected(self):
     G = self.G.copy()
     nx.set_edge_attributes(G, self.G_weights, "weight")
     constraint = nx.constraint(G, weight="weight")
     assert constraint["G"] == pytest.approx(0.299, abs=1e-3)
     assert constraint["A"] == pytest.approx(0.795, abs=1e-3)
     assert constraint["C"] == pytest.approx(1, abs=1e-3)
Beispiel #10
0
 def test_constraint_weighted_directed(self):
     D = self.D.copy()
     nx.set_edge_attributes(D, self.D_weights, 'weight')
     constraint = nx.constraint(D, weight='weight')
     assert_almost_equal(round(constraint[0], 3), 0.840)
     assert_almost_equal(round(constraint[1], 3), 1.143)
     assert_almost_equal(round(constraint[2], 3), 1.378)
Beispiel #11
0
def calculate_networks_indicators(graph):
    """计算基本网络指标"""
    degree_centrality = nx.degree_centrality(graph)
    nodes = list(degree_centrality.keys())
    betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
    network_indicators = pd.DataFrame({
        'nodes':
        nodes,
        'degree_centrality': [degree_centrality[node] for node in nodes],
        'betweenness_centrality':
        [betweenness_centrality[node] for node in nodes]
    })

    network_indicators['local_reaching_centrality'] = [
        nx.local_reaching_centrality(graph, node, weight='weight')
        for node in nodes
    ]
    constraint = nx.constraint(graph, weight='weight')
    network_indicators['constraint'] = [constraint[node] for node in nodes]
    effective_size = nx.effective_size(graph, weight='weight')
    network_indicators['effective_size'] = [
        effective_size[node] for node in nodes
    ]
    triangles = nx.triangles(graph)
    network_indicators['triangles'] = [triangles[node] for node in nodes]
    clustering = nx.clustering(graph, weight='weight')
    network_indicators['clustering'] = [clustering[node] for node in nodes]

    weight_dict = {
        item[0]: item[1]
        for item in nx.degree(graph, weight='weight')
    }
    degree_dict = {item[0]: item[1] for item in nx.degree(graph)}
    average_weight_dict = {
        weight_key:
        (weight_dict[weight_key] /
         degree_dict[weight_key] if degree_dict[weight_key] != 0 else 0)
        for weight_key in weight_dict.keys()
    }
    network_indicators['tie_strength'] = [
        average_weight_dict[node] for node in nodes
    ]
    network_indicators['number_of_node'] = nx.number_of_nodes(graph)
    network_indicators['density'] = nx.density(graph)
    cliques = nx.graph_clique_number(graph)
    if cliques >= 3:
        network_indicators['cliques'] = cliques
    else:
        network_indicators['cliques'] = 0
    network_indicators['efficiency'] = nx.global_efficiency(graph)
    network_indicators['isolates'] = nx.number_of_isolates(graph)

    network_indicators = network_indicators[[
        'nodes', 'degree_centrality', 'betweenness_centrality',
        'local_reaching_centrality', 'constraint', 'effective_size',
        'triangles', 'clustering', 'tie_strength', 'number_of_node', 'density',
        'cliques', 'efficiency', 'isolates'
    ]]
    return network_indicators
Beispiel #12
0
    def __scores(self, g):
        bw = nx.betweenness_centrality(g)
        cc = nx.closeness_centrality(g)
        ei = nx.eigenvector_centrality_numpy(g)
        deg = nx.degree_centrality(g)
        pr = nx.pagerank(g)
        cl = nx.clustering(g)
        ec = nx.eccentricity(g)
        sh = nx.constraint(g)

        return bw, cc, ei, deg, pr, cl, ec, sh
Beispiel #13
0
def statistic_analysis(G, output):
    write_book = xw.Workbook(output)
    # 总表
    write_book_sheet1 = write_book.add_worksheet('总表')
    col = [['名称', '值'],
           ['节点数量', G.number_of_nodes()],
           ['边数量', G.number_of_edges()],
           ['网络密度', nx.density(G)],
           ['网络传递性', nx.transitivity(G)],
           ['平均聚类系数', average_value(nx.clustering(G))],
           ['平均点度中心度(相对)', average_value(nx.degree_centrality(G))],
           ['平均接近中心度', average_value(nx.closeness_centrality(G))],
           ['平均中介中心度', average_value(nx.betweenness_centrality(G))],
           # ['平均特征向量中心度', average_value(nx.eigenvector_centrality(G))],
           # ['average_efficiency', average_value(nx.effective_size(G))],
           ['average_constraint', average_value(nx.constraint(G))],
           ['average_embeddedness', average_value(mul(nx.betweenness_centrality(G), nx.clustering(G)))],
           ['点度中心势(相对)', C_CP_RD(nx.degree_centrality(G))],
           ['中介中心势', C_CP_RB(nx.betweenness_centrality(G))],
           ['接近中心势', C_CP_RP(nx.closeness_centrality(G))]
           ]  # 算法见论文公式表
    for i in range(0, len(col)):
        write_book_sheet1.write(i, 0, col[i][0])
        write_book_sheet1.write(i, 1, col[i][1])
    print('sum_gheet done!')

    # 输出排名数目
    K = int(G.number_of_nodes() / 1)
    # 写一个二维数组装下表名和字典
    sheet_box = [['聚类系数', nx.clustering(G)],
                 ['点度中心度(相对)', nx.degree_centrality(G)],
                 ['接近中心度', nx.closeness_centrality(G)],
                 ['中介中心度', nx.betweenness_centrality(G)],
                 # ['特征向量中心度', nx.eigenvector_centrality(G)],
                 # ['efficiency', nx.effective_size(G)],
                 # ['constraint', nx.constraint(G)],
                 # ['embeddedness', mul(nx.betweenness_centrality(G), nx.clustering(G))]
                 ]  # 算法见论文公式表
    for i in range(0, len(sheet_box)):
        write_book_sheet2 = write_book.add_worksheet(sheet_box[i][0])
        write_book_sheet2.write(0, 0, '排名')
        write_book_sheet2.write(0, 1, '名称')
        write_book_sheet2.write(0, 2, '值')
        output_dict = get_k(sheet_box[i][1], K)
        for j in range(0, K):
            write_book_sheet2.write(j + 1, 0, j + 1)
            write_book_sheet2.write(j + 1, 1, list(output_dict.keys())[j])
            write_book_sheet2.write(j + 1, 2, list(output_dict.values())[j])
        print('sheet ' + str(i + 1) + ' done!')

    print(output + ' done!')
    write_book.close()
def get_network_metrics(G):
    # print(len(G.nodes()), len(G.edges()))
    constraint = list(nx.constraint(G, weight='weight').values())
    # constraint_stats = more_constraint_stats(constraint)
    # esize = nx.effective_size(G)
    # efficiency = {n: (v / G.degree(n) if G.degree(n) != 0 else 0) for n, v in esize.items()}
    constraint = [x for x in constraint if not math.isnan(x)]
    return [
        nx.density(G),
        nx.average_clustering(G, weight='weight'),
        centralization(G),
        np.mean(constraint),
        len(G.nodes()),
        len(G.edges()), 1.0 * len(list(nx.isolates(G))) / len(G.nodes()), 1.0 *
        len(max(nx.connected_component_subgraphs(G), key=len)) / len(G.nodes())
    ]
Beispiel #15
0
def hierarchy(G, nodes=None, weight=None):
    '''Returns the hierarchy of all nodes in the graph G'''
    if nodes is None:
         nodes = G
    hierarchy = {}
    for v in nodes:
        if len (G[v])== 0:
            hierarchy[v] == float('nan')
            continue
        N = len (G[v])
        constraint_v = nx.constraint(G,[v])
        temp = 0
        for j in G[v]:
            temp = temp+(nx.local_constraint(G, v, j)/(constraint_v[v]/N))*np.log(nx.local_constraint(G, v, j)/(constraint_v[v]/N))
        hierarchy[v] = temp/(N*np.log(N))
    return hierarchy
Beispiel #16
0
def constraint(graph, nodes, year, indicator_type):
    constraint = nx.constraint(graph, weight='weight')
    data = pd.DataFrame({
        'nodes': nodes,
        'constraint': [constraint[node] for node in nodes]
    })
    if indicator_type == '三年期':
        excel_path = '../data/生成数据/04关系矩阵_中间指标/三年期/' + str(year) + '-' + str(
            year + 2) + '年竞争关系矩阵'
    else:
        excel_path = '../data/生成数据/04关系矩阵_中间指标/五年期/' + str(year) + '-' + str(
            year + 4) + '年竞争关系矩阵'
    folder = os.path.exists(excel_path)
    if not folder:
        os.makedirs(excel_path)
    data.to_excel(excel_writer=excel_path + '/constraint指标.xlsx', index=False)
    print(str(year) + '年' + 'constraint' + '计算完毕!')
Beispiel #17
0
def add_structural_holes_constraint(network: nx.Graph):
    sh = nx.constraint(network)
    nx.set_node_attributes(network, sh, 'Structural_Holes_Constraint')
    return network
Beispiel #18
0
 def test_constraint_directed(self):
     constraint = nx.constraint(self.D)
     assert_almost_equal(round(constraint[0], 3), 1.003)
     assert_almost_equal(round(constraint[1], 3), 1.003)
     assert_almost_equal(round(constraint[2], 3), 1.389)
Beispiel #19
0
 def test_constraint_isolated(self):
     G = self.G.copy()
     G.add_node(1)
     constraint = nx.constraint(G)
     assert_true(math.isnan(constraint[1]))
def features_part2(info):
    """
    third set of features.
    """
    G = info['G']
    n = info['num_nodes']
    num_units = info['num_units']
    edges = info['edges']
    nedges = len(edges)

    H = G.to_undirected()

    res = dict()
    cc = nx.closeness_centrality(G)
    res['closeness_centrality'] = cc[n - 1]
    res['closeness_centrality_mean'] = np.mean(list(cc.values()))

    bc = nx.betweenness_centrality(G)
    res['betweenness_centrality_mean'] = np.mean(list(bc.values()))

    cfcc = nx.current_flow_closeness_centrality(H)
    res['current_flow_closeness_centrality_mean'] = np.mean(list(
        cfcc.values()))

    cfbc = nx.current_flow_betweenness_centrality(H)
    res['current_flow_betweenness_centrality_mean'] = np.mean(
        list(cfbc.values()))

    soc = nx.second_order_centrality(H)
    res['second_order_centrality_mean'] = np.mean(list(soc.values())) / n

    cbc = nx.communicability_betweenness_centrality(H)
    res['communicability_betweenness_centrality_mean'] = np.mean(
        list(cbc.values()))

    comm = nx.communicability(H)
    res['communicability'] = np.log(comm[0][n - 1])
    res['communicability_start_mean'] = np.log(np.mean(list(comm[0].values())))
    res['communicability_end_mean'] = np.log(
        np.mean(list(comm[n - 1].values())))

    res['radius'] = nx.radius(H)
    res['diameter'] = nx.diameter(H)
    res['local_efficiency'] = nx.local_efficiency(H)
    res['global_efficiency'] = nx.global_efficiency(H)
    res['efficiency'] = nx.efficiency(H, 0, n - 1)

    pgr = nx.pagerank_numpy(G)
    res['page_rank'] = pgr[n - 1]
    res['page_rank_mean'] = np.mean(list(pgr.values()))

    cnstr = nx.constraint(G)
    res['constraint_mean'] = np.mean(list(cnstr.values())[:-1])

    effsize = nx.effective_size(G)
    res['effective_size_mean'] = np.mean(list(effsize.values())[:-1])

    cv = np.array(list(nx.closeness_vitality(H).values()))
    cv[cv < 0] = 0
    res['closeness_vitality_mean'] = np.mean(cv) / n

    res['wiener_index'] = nx.wiener_index(H) / (n * (n - 1) / 2)

    A = nx.to_numpy_array(G)
    expA = expm(A)
    res['expA'] = np.log(expA[0, n - 1])
    res['expA_mean'] = np.log(np.mean(expA[np.triu_indices(n)]))

    return res
 def test_constraint_undirected(self):
     constraint = nx.constraint(self.G)
     assert_almost_equal(round(constraint['G'], 3), 0.400)
     assert_almost_equal(round(constraint['A'], 3), 0.595)
     assert_almost_equal(round(constraint['C'], 3), 1)
def get_centralities(compare):

    params = [5000, 2000, 1000, 500, 100, 50, 40, 30, 20, 10, 5, 4, 3, 2, 1, 0]
    #[300000, 150000, 100000, 50000, 35000, 20000, 14000, 10000, 5000, 2000, 1000, 500, 100, 50, 30, 20, 10, 5, 1]
    folderout = 'networks/backboning_centralities/'
    if not os.path.exists(folderout):
        os.makedirs(folderout)

    time_nx = []
    time_ig = []
    ftimes = open(folderout + 'compare_comp_time.dat', 'w')

    ftimes.write('nc\tt_nx\tt_ig\n')

    for nc in params:
        ''' NETWORKX '''

        edges_nx = []
        t1 = time.time()

        print 'Parse edges'
        for ind, line in enumerate(
                open('networks/backboning/nc_backboned_' + str(nc))):
            if 'nij' not in line:
                e1, e2, w, sign = line.strip().split('\t')
                edges_nx.append((e1, e2, {'weight': float(w)}))

        G_nx = nx.Graph()
        G_nx.add_edges_from(edges_nx)
        GC_nx = [
            c for c in sorted(
                nx.connected_components(G_nx), key=len, reverse=True)
        ][0]

        print nc, '\tGet NC degrees'
        degrees_nx = add_df_meas(nx.degree_centrality(G_nx), 'degree_nx')

        print nc, '\tGet NC clustering'
        clusterings_nx = add_df_meas(nx.clustering(G_nx), 'clustering_nx')

        print nc, '\tGet NC pageranks'
        pageranks_nx = add_df_meas(nx.pagerank(G_nx), 'pagerank_nx')

        print nc, '\tGet NC betweenness'
        betweennesses_nx = add_df_meas(nx.betweenness_centrality(G_nx),
                                       'betweenness_nx')

        print nc, '\tGet NC closeness'
        closenesses_nx = add_df_meas(nx.closeness_centrality(G_nx),
                                     'closeness_nx')

        #print 'Get eigenvector'
        #eigenvectors_nx   = add_df_meas(nx.eigenvector_centrality(G_nx), 'eigenvector_mx')

        print nc, '\tGet NC constraint'
        constraints_nx = add_df_meas(nx.constraint(G_nx), 'constraint_nx')

        df_nx = degrees_nx.merge(clusterings_nx,
                                 left_index=True,
                                 right_index=True)
        df_nx = df_nx.merge(pageranks_nx, left_index=True, right_index=True)
        df_nx = df_nx.merge(betweennesses_nx,
                            left_index=True,
                            right_index=True)
        df_nx = df_nx.merge(closenesses_nx, left_index=True, right_index=True)
        df_nx = df_nx.merge(constraints_nx, left_index=True, right_index=True)

        t2 = time.time()
        t_nx = t2 - t1
        time_nx.append(t_nx)

        print 'Time for NX:  ', round(t_nx, 2), ' s'
        ''' IGRAPH '''

        # get the igraph network
        t1 = time.time()
        ftempname = 'tempfile_nc_backboned' + str(nc)
        ftemp = open(ftempname, 'w')
        for line in open('networks/backboning/nc_backboned_' + str(nc)):
            if 'src' not in line:
                ftemp.write('\t'.join(line.strip().split('\t')[0:3]) + '\n')
        ftemp.close()
        G_ig = Graph.Read_Ncol(ftempname, weights=True, directed=False)
        os.remove(ftempname)

        # get degree thats matching
        # nw computes degree centrality, which is the k/(N-1), while ig computes k
        # https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.algorithms.centrality.degree_centrality.html
        print '\n', nc, '\tGet IG degrees'
        degrees_ig = {}
        G_ig.vs['degree_ig'] = G_ig.degree()
        N = len(G_ig.vs['degree_ig'])
        for v in G_ig.vs():
            degrees_ig[v['name']] = v['degree_ig'] / float(N - 1)

        # get the matching clustering
        # when nw gives 0 for clustering, ig gives nan
        print nc, '\tGet IG clustering'
        clusterings_ig = {}
        G_ig.vs['clustering_ig'] = G_ig.transitivity_local_undirected(
            weights=None)
        for v in G_ig.vs():
            if np.isnan(v['clustering_ig']):
                v['clustering_ig'] = 0
            clusterings_ig[v['name']] = v['clustering_ig']

        # match betweenness
        # nx gives the normalzed betweenness, while igraph gives the raw value. normalization vactor is
        # Bnorm = =  (n*n-3*n+2) / 2.0                      http://igraph.org/r/doc/betweenness.html
        print nc, '\tGet IG betweenness'
        G_ig.vs['betweenness_ig'] = G_ig.betweenness(weights=None)
        betweennesses_ig = {}
        n = len(G_ig.vs())
        for v in G_ig.vs():
            Bnormalizer = (n * n - 3 * n + 2) / 2.0
            betweennesses_ig[v['name']] = v['betweenness_ig'] / Bnormalizer

        # comparing closeness:
        # NX: If the graph is not completely connected, this algorithm computes the closeness centrality for each connected part separately.
        #    https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.centrality.closeness_centrality.html
        # IG: If the graph is not connected, and there is no path between two vertices, the number of vertices is used instead the length of the geodesic. This is always longer than the longest possible geodesic.
        # http://igraph.org/python/doc/igraph.GraphBase-class.html#closeness
        print nc, '\tGet IG closeness'
        closenesses_ig = {}
        G_ig.vs['closeness_ig'] = G_ig.closeness(weights=None,
                                                 normalized=False)
        for v in G_ig.vs():
            closenesses_ig[v['name']] = v['closeness_ig']

        # get matching pagerank values
        # they match, besides some numerical things
        print nc, '\tGet IG pageranks'
        pageranks_ig = {}
        G_ig.vs['pagerank_ig'] = G_ig.pagerank(weights=None)
        for v in G_ig.vs():
            pageranks_ig[v['name']] = v['pagerank_ig']

        # constrains match well
        print nc, '\tGet IG constraint'
        constraints_ig = {}
        G_ig.vs['constraint_ig'] = G_ig.constraint(weights=None)
        for v in G_ig.vs():
            constraints_ig[v['name']] = v['constraint_ig']

        # G_ig.vs['eigenvector_ig']  = G_ig.eigenvector_centrality( weights = None )

        degrees_ig = add_df_meas(degrees_ig, 'degree_ig')
        clusterings_ig = add_df_meas(clusterings_ig, 'clustering_ig')
        betweennesses_ig = add_df_meas(betweennesses_ig, 'betweennesse_ig')
        pageranks_ig = add_df_meas(pageranks_ig, 'pagerank_ig')
        constraints_ig = add_df_meas(constraints_ig, 'constraint_ig')
        closenesses_ig = add_df_meas(closenesses_ig, 'closenesse_ig')

        df_ig = degrees_ig.merge(clusterings_ig,
                                 left_index=True,
                                 right_index=True)
        df_ig = df_ig.merge(pageranks_ig, left_index=True, right_index=True)
        df_ig = df_ig.merge(betweennesses_ig,
                            left_index=True,
                            right_index=True)
        df_ig = df_ig.merge(closenesses_ig, left_index=True, right_index=True)
        df_ig = df_ig.merge(constraints_ig, left_index=True, right_index=True)

        t2 = time.time()
        t_ig = t2 - t1
        time_nx.append(t_ig)

        print 'Time for IG:  ', round(t_ig, 2), ' s\n\n'

        df_nx.to_csv(folderout + 'nc_backboned_centralities_NX_' + str(nc),
                     na_rep='nan')
        df_ig.to_csv(folderout + 'nc_backboned_centralities_IG_' + str(nc),
                     na_rep='nan')

        if compare:
            compare('degree    ', dict(degrees_nx.degree_nx), degrees_ig,
                    GC_nx)
            compare('clustering', dict(clusterings_nx.clustering_nx),
                    clusterings_ig, GC_nx)
            compare('pagerank   ', dict(pageranks_nx.pagerank_nx),
                    pageranks_ig, GC_nx)
            compare('betweenness', dict(betweennesses_nx.betweenness_nx),
                    betweennesses_ig, GC_nx)
            compare('closeness', dict(closenesses_nx.closeness_nx),
                    closenesses_ig, GC_nx)
            compare('constraint', dict(constraints_nx.constraint_nx),
                    constraints_ig, GC_nx)

        ftimes.write(str(nc) + '\t' + str(t_nx) + '\t' + str(t_ig) + '\n')
    ftimes.close()
 def test_constraint_directed(self):
     constraint = nx.constraint(self.D)
     assert_almost_equal(round(constraint[0], 3), 1.003)
     assert_almost_equal(round(constraint[1], 3), 1.003)
     assert_almost_equal(round(constraint[2], 3), 1.389)
 def test_constraint_isolated(self):
     G = self.G.copy()
     G.add_node(1)
     constraint = nx.constraint(G)
     assert_true(math.isnan(constraint[1]))
Beispiel #25
0
 def test_constraint_directed(self):
     constraint = nx.constraint(self.D)
     assert constraint[0] == pytest.approx(1.003, abs=1e-3)
     assert constraint[1] == pytest.approx(1.003, abs=1e-3)
     assert constraint[2] == pytest.approx(1.389, abs=1e-3)
Beispiel #26
0
 def test_constraint_undirected(self):
     constraint = nx.constraint(self.G)
     assert_almost_equal(round(constraint['G'], 3), 0.400)
     assert_almost_equal(round(constraint['A'], 3), 0.595)
     assert_almost_equal(round(constraint['C'], 3), 1)
 def test_constraint_directed(self):
     constraint = nx.constraint(self.D)
     assert almost_equal(constraint[0], 1.003, places=3)
     assert almost_equal(constraint[1], 1.003, places=3)
     assert almost_equal(constraint[2], 1.389, places=3)
Beispiel #28
0
 def test_constraint_undirected(self):
     constraint = nx.constraint(self.G)
     assert constraint["G"] == pytest.approx(0.400, abs=1e-3)
     assert constraint["A"] == pytest.approx(0.595, abs=1e-3)
     assert constraint["C"] == pytest.approx(1, abs=1e-3)
 def test_constraint_undirected(self):
     constraint = nx.constraint(self.G)
     assert almost_equal(constraint["G"], 0.400, places=3)
     assert almost_equal(constraint["A"], 0.595, places=3)
     assert almost_equal(constraint["C"], 1, places=3)
Beispiel #30
0
def static_indicators(path, file_name, num_firm, writer):
    density_array = np.zeros((len(file_name), num_firm))
    constraint_array = np.zeros((len(file_name), num_firm))
    degree_centrality_array = np.zeros((len(file_name), num_firm))
    betweenness_centrality_array = np.zeros((len(file_name), num_firm))
    closeness_centrality_array = np.zeros((len(file_name), num_firm))
    degree_centralization_array = np.zeros((len(file_name), num_firm))
    betweenness_centralization_array = np.zeros((len(file_name), num_firm))
    closeness_centralization_array = np.zeros((len(file_name), num_firm))
    bridging_ties_array = np.zeros((len(file_name), num_firm))
    modularity_array = np.zeros((len(file_name), num_firm))
    years = []
    for name in range(len(file_name)):
        degree_part_dict = dict()
        density_dict = dict()
        constraint_dict = dict()
        degree_centrality_dict = dict()
        betweenness_centrality_dict = dict()
        closeness_centrality_dict = dict()
        degree_centralization_dict = dict()
        betweenness_centralization_dict = dict()
        closeness_centralization_dict = dict()
        modularity_dict = dict()
        years.append(file_name[name][0:-5])
        # 文件路径
        file_path = path + '/' + file_name[name]

        # 读取数据
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.loads(f.read())

        num_edges = len(data['links'])

        # 读取簇团信息及簇团内连接信息  连接及权重信息
        edges = [(data['links'][k]['source'], data['links'][k]['target'])
                 for k in range(num_edges)]
        clusters = [
            data['clusters'][k]['cluster']
            for k in range(len(data['clusters']))
        ]
        clusters_edges = [
            data['clusters'][k]['edges'] for k in range(len(data['clusters']))
        ]

        g_full = nx.Graph()
        g_full.add_nodes_from(list(range(num_firm)))
        g_full.add_edges_from(edges)

        degree_dict = dict(nx.degree(g_full))

        for i in range(len(clusters)):
            n_cluster = len(clusters[i])
            g = nx.Graph()
            g.add_nodes_from(clusters[i])
            g.add_edges_from(clusters_edges[i])

            degree_cluster = dict(nx.degree(g))
            sum_degree = sum(degree_cluster.values())

            degree_part_dict = {**degree_part_dict, **degree_cluster}

            density_cluster = nx.density(g)
            for firm in clusters[i]:
                density_dict[firm] = density_cluster

            constraint_cluster = nx.constraint(g)
            constraint_dict = {**constraint_dict, **constraint_cluster}

            degree_centrality_cluster = nx.degree_centrality(g)
            degree_centrality_dict = {
                **degree_centrality_dict,
                **degree_centrality_cluster
            }

            betweenness_centrality_cluster = nx.betweenness_centrality(g)
            betweenness_centrality_dict = {
                **betweenness_centrality_dict,
                **betweenness_centrality_cluster
            }

            closeness_centrality_cluster = nx.closeness_centrality(g)
            closeness_centrality_dict = {
                **closeness_centrality_dict,
                **closeness_centrality_cluster
            }
            if n_cluster == 1 or n_cluster == 2:
                for firm in clusters[i]:
                    degree_centralization_dict[firm] = 0
                    betweenness_centralization_dict[firm] = 0
                    closeness_centralization_dict[firm] = 0
            else:
                dc = fun1(degree_centrality_cluster.values())
                bc = fun1(betweenness_centrality_cluster.values())
                cc = fun1(closeness_centrality_cluster.values())
                degree_centralization_cluster = dc / (n_cluster - 2)
                betweenness_centralization_cluster = bc / (n_cluster - 1)
                closeness_centrality_cluster = cc * (2 * n_cluster - 3) / (
                    (n_cluster - 2) * (n_cluster - 1))
                for firm in clusters[i]:
                    degree_centralization_dict[
                        firm] = degree_centralization_cluster
                    betweenness_centralization_dict[
                        firm] = betweenness_centralization_cluster
                    closeness_centralization_dict[
                        firm] = closeness_centrality_cluster

            # 子群的模块度
            modularity = 0
            cluster = clusters[i]
            if n_cluster == 1:
                modularity = None
                modularity_dict[cluster[0]] = None
            else:
                for firm_i in range(len(cluster)):
                    for firm_j in range(firm_i + 1, len(cluster)):
                        i_j = degree_cluster[cluster[firm_i]] * degree_cluster[
                            cluster[firm_j]] / sum_degree
                        if [cluster[firm_i],
                                cluster[firm_j]] in clusters_edges[i]:
                            modularity += (1 - i_j) / sum_degree
                        else:
                            modularity += (0 - i_j) / sum_degree

            for firm in clusters[i]:
                modularity_dict[firm] = modularity

        for i in range(num_firm):
            density_array[name, i] = density_dict[i]
            constraint_array[name, i] = constraint_dict[i]
            degree_centrality_array[name, i] = degree_centrality_dict[i]
            betweenness_centrality_array[name,
                                         i] = betweenness_centrality_dict[i]
            closeness_centrality_array[name, i] = closeness_centrality_dict[i]
            degree_centralization_array[name,
                                        i] = degree_centralization_dict[i]
            betweenness_centralization_array[
                name, i] = betweenness_centralization_dict[i]
            closeness_centralization_array[
                name, i] = closeness_centralization_dict[i]
            bridging_ties_array[name, i] = degree_dict[i] - degree_part_dict[i]
            modularity_array[name, i] = modularity_dict[i]

    write_excel(writer, density_array, years, list(range(num_firm)), '网络密度')
    write_excel(writer, constraint_array, years, list(range(num_firm)),
                'Constraint')
    write_excel(writer, degree_centrality_array, years, list(range(num_firm)),
                '点度中心度')
    write_excel(writer, betweenness_centrality_array, years,
                list(range(num_firm)), '中介中心度')
    write_excel(writer, closeness_centrality_array, years,
                list(range(num_firm)), '接近中心度')
    write_excel(writer, degree_centralization_array, years,
                list(range(num_firm)), '点度中心势')
    write_excel(writer, betweenness_centralization_array, years,
                list(range(num_firm)), '中介中心势')
    write_excel(writer, closeness_centralization_array, years,
                list(range(num_firm)), '接近中心势')
    write_excel(writer, bridging_ties_array, years, list(range(num_firm)),
                'bridging_ties')
    write_excel(writer, modularity_array, years, list(range(num_firm)),
                '子群模块度')
    print('十大静态指标计算完成')
TE_system.add_edge('V9', 'LI4')
TE_system.add_edge('V9', 'FI9')
TE_system.add_edge('Stripper', 'FI9')
TE_system.add_edge('Stripper', 'TI5')
TE_system.add_edge('TI5', 'PLC10')
TE_system.add_edge('PLC10', 'V10')
TE_system.add_edge('V10', 'Stripper')
TE_system.add_edge('V10', 'FI8')

# Control Links of Compressor
TE_system.add_edge('Compressor', 'FI10')
TE_system.add_edge('FI10', 'PLC11')
TE_system.add_edge('PLC11', 'V11')
TE_system.add_edge('V11', 'Compressor')

undirected_TE = TE_system.to_undirected()

DC = Metric('DC', nx.degree_centrality(TE_system), True)
CC = Metric('CC', nx.closeness_centrality(TE_system), True)
EC = Metric('EC', nx.eigenvector_centrality(TE_system), True)
C = Metric('C', nx.constraint(undirected_TE), False)
FBC = Metric('FBC', nx.current_flow_betweenness_centrality(undirected_TE),
             True)
metrics = (DC, CC, FBC, EC, C)
relative_closeness = TOPSIS(metrics)

file_name_1 = 'TE_metrics_Jiang.xls'
save_all_metrics(file_name_1, metrics)
file_name_2 = 'relative_closeness_Jiang.xls'
closeness_save(file_name_2, relative_closeness)
Beispiel #32
0
def get_each_range_network(con, start, end, span, city, gen_all=False):
    """
    生成给定时间范围,以指定城市为中心的合作网络
    也可以生成该时间范围的全局合作网络

    :param con: 数据库连接
    :param start: 开始年份
    :param end: 结束年份
    :param span: 时间跨度
    :param city: 城市
    :param gen_all: 是否生成全局网络
    :return:
    """
    city_networks = []
    all_networks = []
    cursor = con.cursor()

    for year in range(start, end - span + 2):
        # print('正在生成{}到{}年{}的合作网络'.format(year, year + span - 1, city))
        city_query_sql = 'SELECT UPPER(TRIM(`city1`)), UPPER(TRIM(`city2`)), `year` FROM `energy_city_cooccurrence` ' \
                         'WHERE `year` BETWEEN ? AND ? AND (`city1` LIKE ? OR `city2` LIKE ?)'

        cursor.execute(city_query_sql, (year, year + span - 1, city, city))
        results = cursor.fetchall()

        cities = generate_matrix_index(results)
        cur_city_network = get_cooccurrance_network(cities, results, 'CITY')
        city_networks.append(
            (str(year) + '-' + str(year + span - 1), cur_city_network))
        # nx.write_gexf(cur_city_network, '../results/' + str(year) + '-' + str(year + span - 1) + '-' + city + '.gexf')

        if gen_all:
            # print('正在生成{}到{}年的全部城市合作网络'.format(year, year + span - 1, city))
            all_query_sql = 'SELECT UPPER(TRIM(`city1`)), UPPER(TRIM(`city2`)), `year` FROM `energy_city_cooccurrence` ' \
                            'WHERE `year` BETWEEN ? AND ?'

            cursor.execute(all_query_sql, (year, year + span - 1))
            results = cursor.fetchall()

            all_cities = generate_matrix_index(results)
            cur_all_network = get_cooccurrance_network(all_cities, results,
                                                       'CITY')

            # 计算PageRank(边加权),各个城市的PageRank加起来约等于1,附加到网络的节点属性中
            pagerank = nx.pagerank(cur_all_network, weight='weight')
            nx.set_node_attributes(cur_all_network, pagerank, 'Page Rank')

            # 为城市节点添加地理编码
            city_name_dict = nx.get_node_attributes(cur_all_network, 'CITY')
            city_name = city_name_dict.values()
            latitude_dict, longitude_dict = get_geocode(city_name)

            latitude = {}
            longitude = {}
            for i, c in city_name_dict.items():
                # 如果城市经纬度不存在,则删除该节点(因为这个其实不重要)
                if c not in latitude_dict or isclose(float(latitude_dict[c]),
                                                     float(999)):
                    cur_all_network.remove_node(i)
                    continue
                latitude[i] = latitude_dict[c]
                longitude[i] = longitude_dict[c]

            nx.set_node_attributes(cur_all_network, latitude, 'Latitude')
            nx.set_node_attributes(cur_all_network, longitude, 'Longitude')

            #为城市节点添加平均距离(未归一)
            print('正在计算全局网络中各城市的平均距离,可能耗时较长')
            avg_distance_dict = {}
            for index in latitude.keys():
                city1 = (latitude[index], longitude[index])
                single_city_distance_sum = 0
                for index2 in latitude.keys():
                    if index == index2:
                        continue
                    else:
                        city2 = (latitude[index2], longitude[index2])
                        single_city_distance_sum += great_circle(
                            city1, city2).kilometers
                avg_distance_dict[index] = single_city_distance_sum / (
                    len(latitude) - 1)
                # TODO:为了节省调试时间此处不计算平均距离
                # avg_distance_dict[index] = 0
            print(avg_distance_dict)
            nx.set_node_attributes(cur_all_network, avg_distance_dict,
                                   'Average Distance')

            #为城市添加点度中心度、集聚系数、结构洞计算
            print('在正在计算合作网络基础指标,结构洞计算可能耗时较长')
            dc = nx.degree_centrality(cur_all_network)
            # triangle = nx.triangles(cur_all_network)
            triangle = nx.clustering(cur_all_network)
            sh = nx.constraint(cur_all_network)

            nx.set_node_attributes(cur_all_network, dc,
                                   'Normalized Degree Centrality')
            nx.set_node_attributes(cur_all_network, triangle, 'Triangles')
            nx.set_node_attributes(cur_all_network, sh,
                                   'Structural Hole Constraint')

            all_networks.append(
                (str(year) + '-' + str(year + span - 1), cur_all_network))
            # nx.write_gexf(cur_all_network,
            #               '../results/' + str(year) + '-' + str(year + span - 1) + '-all.gexf')
    print('各时期网络生成完成')
    if gen_all:
        return city_networks, all_networks
    else:
        return city_networks