Пример #1
0
def renomal_eff():
    G = nx.Graph()
    node_index = 1
    for i in range(3):
        G.add_edge(0,node_index)
        node_index += 1
    nodes_degree = list(nx.degree(G))
    print(nx.global_efficiency(G))
    for degree in nodes_degree:
        for i in range(2*degree[1]):
            G.add_edge(degree[0], node_index)
            node_index += 1
    print(nx.global_efficiency(G))
Пример #2
0
def globaleff_metric(g):
    """
    Calculate global efficiency
    """
    # Calculate global efficiency
    ge = nx.global_efficiency(g)
    return ge
Пример #3
0
def compute_model_graph_metrics(model_name, dataset, root_dir, epoch,
                                model_location):
    model = torch.load(model_location)
    if dataset == 'mnist':
        input_dim = (1, 1, 28, 28)
    elif dataset == 'cifar10':
        input_dim = (1, 3, 32, 32)

    param_info = get_model_param_info(model_name, dataset)

    architecture = model_name + "_" + dataset
    if (architecture not in model_graph_dict) or (epoch == 0):
        print(("Architecture: {} not found, creating").format(architecture))
        NNG = nn_graph.NNGraph()
        NNG.parameter_graph(model, param_info, input_dim, ignore_zeros=True)

    s_metric = nx.s_metric(NNG.G, normalized=False)
    degree_assortativity_coefficient = nx.degree_assortativity_coefficient(
        NNG.G)

    diameter = nx.diameter(NNG.G)
    global_efficiency = nx.global_efficiency(NNG.G)

    return [
        s_metric, degree_assortativity_coefficient, diameter, global_efficiency
    ]
def get_global_characteristics(G):
    global_dict = {}
    suffix = G.name
    global_dict[f'GlobalEfficiency_{suffix}'] = nx.global_efficiency(G)
    global_dict[f'LocalEfficiency_{suffix}'] = nx.local_efficiency(G)

    return global_dict
Пример #5
0
def clustering_coeff_vic(AG, title, ax1, ax2, iterations, color):
    resultclusteringcoef = []
    resultglobaleficienc = []
    numberedges = []
    for i in range(iterations):
        Gaux = nx.from_numpy_matrix(AG)
        resultclusteringcoef.append(nx.average_clustering(Gaux))
        resultglobaleficienc.append(nx.global_efficiency(Gaux))
        numberedges.append(Gaux.number_of_edges())
        minval = np.min(AG[np.nonzero(AG)])
        a, b = np.where(AG == minval)
        if len(a) > 0:
            a = a[0]
        if len(b) > 0:
            b = b[0]
            AG[a, b] = 0
            AG[b, a] = 0
    ax1.plot(numberedges, resultclusteringcoef, color+'-', label=title)
    ax1.set_xlim([min(numberedges), max(numberedges)])
    ax1.title.set_text('Clustering coefficient')
    ax1.set_xlabel('Number of edges')
    ax1.invert_xaxis()
    ax1.grid()
    ax2.plot(numberedges, resultglobaleficienc, color+'-', label=title)
    ax2.set_xlim([min(numberedges), max(numberedges)])
    ax2.title.set_text('Global Efficiency')
    ax2.set_xlabel('Number of edges')
    ax2.invert_xaxis()
    ax2.grid()
Пример #6
0
 def test_global_efficiency_complete_graph(self):
     """
     Tests that the average global efficiency of the complete graph is one.
     """
     for n in range(2, 10):
         G = nx.complete_graph(n)
         assert_equal(nx.global_efficiency(G), 1)
Пример #7
0
 def test_global_efficiency_complete_graph(self):
     """
     Tests that the average global efficiency of the complete graph is one.
     """
     for n in range(2, 10):
         G = nx.complete_graph(n)
         assert_equal(nx.global_efficiency(G), 1)
Пример #8
0
def calculate_networks_indicators(graph):
    """计算基本网络指标"""
    degree_centrality = nx.degree_centrality(graph)
    nodes = list(degree_centrality.keys())
    betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
    network_indicators = pd.DataFrame({
        'nodes':
        nodes,
        'degree_centrality': [degree_centrality[node] for node in nodes],
        'betweenness_centrality':
        [betweenness_centrality[node] for node in nodes]
    })

    network_indicators['local_reaching_centrality'] = [
        nx.local_reaching_centrality(graph, node, weight='weight')
        for node in nodes
    ]
    constraint = nx.constraint(graph, weight='weight')
    network_indicators['constraint'] = [constraint[node] for node in nodes]
    effective_size = nx.effective_size(graph, weight='weight')
    network_indicators['effective_size'] = [
        effective_size[node] for node in nodes
    ]
    triangles = nx.triangles(graph)
    network_indicators['triangles'] = [triangles[node] for node in nodes]
    clustering = nx.clustering(graph, weight='weight')
    network_indicators['clustering'] = [clustering[node] for node in nodes]

    weight_dict = {
        item[0]: item[1]
        for item in nx.degree(graph, weight='weight')
    }
    degree_dict = {item[0]: item[1] for item in nx.degree(graph)}
    average_weight_dict = {
        weight_key:
        (weight_dict[weight_key] /
         degree_dict[weight_key] if degree_dict[weight_key] != 0 else 0)
        for weight_key in weight_dict.keys()
    }
    network_indicators['tie_strength'] = [
        average_weight_dict[node] for node in nodes
    ]
    network_indicators['number_of_node'] = nx.number_of_nodes(graph)
    network_indicators['density'] = nx.density(graph)
    cliques = nx.graph_clique_number(graph)
    if cliques >= 3:
        network_indicators['cliques'] = cliques
    else:
        network_indicators['cliques'] = 0
    network_indicators['efficiency'] = nx.global_efficiency(graph)
    network_indicators['isolates'] = nx.number_of_isolates(graph)

    network_indicators = network_indicators[[
        'nodes', 'degree_centrality', 'betweenness_centrality',
        'local_reaching_centrality', 'constraint', 'effective_size',
        'triangles', 'clustering', 'tie_strength', 'number_of_node', 'density',
        'cliques', 'efficiency', 'isolates'
    ]]
    return network_indicators
Пример #9
0
def calculate_global_measures(G,
                              partition=None,
                              existing_global_measures=None):
    '''
    Calculate global measures `average_clustering`,
    `average_shortest_path_length`, `assortativity`, `modularity`, and
    `efficiency` of G.

    Parameters
    ----------
    G : :class:`networkx.Graph`
        A binary graph
    partition : dict, optional
        A nodal partition of G. A dictionary mapping nodes of G to modules.
        Pass a partition in order to calculate the modularity of G.
    existing_global_measures : dict, optional
        An existing dictionary of global measures of G can be passed.
        :func:`calculate_global_measures` will not recalculate any measures
        already indexed in G

    Returns
    -------
    dict
        a dictionary of global network measures of G

    See Also
    --------
    :func:`scona.BrainNetwork.calculate_global_measures`
    '''
    # ==== MEASURES ====================
    if existing_global_measures is not None:
        global_measures = existing_global_measures.copy()
    else:
        global_measures = {}

    # ---- Clustering coefficient ------
    if 'average_clustering' not in global_measures:
        global_measures['average_clustering'] = (nx.average_clustering(G))

    # ---- Shortest path length --------
    if 'average_shortest_path_length' not in global_measures:
        global_measures['average_shortest_path_length'] = (
            nx.average_shortest_path_length(G))

    # ---- Assortativity ---------------
    if 'assortativity' not in global_measures:
        global_measures['assortativity'] = (np.mean(
            nx.degree_assortativity_coefficient(G)))

    # ---- Modularity ------------------
    if partition is not None:
        if 'modularity' not in global_measures:
            global_measures['modularity'] = (calc_modularity(G, partition))

    #  ---- Efficiency ------------------
    if 'efficiency' not in global_measures:
        global_measures['efficiency'] = (nx.global_efficiency(G))

    return global_measures
Пример #10
0
 def __init__(self, matrix, names, coordinates, colors):
     self.num_of_nodes = 0
     self.nodes = []
     self.edges = []
     self.set_connectivity_matrix(matrix, names, coordinates, colors)
     self.graphnx = self.built_graph()
     self.calculate_bc(self.graphnx)
     self.global_efficiency = nx.global_efficiency(self.graphnx)
     self.set_local_efficiency()
Пример #11
0
def gete(matrix_df):
    G = nx.Graph()
    matrix = matrix_df.values
    for i in range(len(matrix)):
        for j in range(len(matrix)):
            if (matrix[i][j] == 1):
                G.add_edge(i, j)
    e = nx.global_efficiency(G)
    return e
Пример #12
0
    def global_efficiency(self) -> float:
        if self._global_efficiency is None:
            if self.directed:
                self._global_efficiency = global_efficiency_directional(
                    self.graph)
            else:
                self._global_efficiency = global_efficiency(self.graph)

        return self._global_efficiency
Пример #13
0
def avg_inverse_distance(graph):
    """
    The larger the average inverse shortest path distance, the more robust the graph.
    This can be viewed through the lens of network connectivity i.e., larger avg. inverse distance -> better connected graph
    -> more robust graph.
    Resolves the issue of not working for disconnected graphs in the avg_distance() function.
    Undefined for disconnected graphs.

    :param graph: undirected NetworkX graph
    :return: a float
    """
    return round(nx.global_efficiency(graph), 2)
Пример #14
0
def calc_graph(matrix):
    thresholds = [90, 85, 80, 75]
    glob = np.zeros((1, 4))
    loc = np.zeros((1, 4))
    Q = np.zeros((1, 4))
    Ch = np.zeros((1, 4))
    Ph = np.zeros((1, 4))

    data = np.zeros((1, 5))

    # Run graph measure analysis
    for index, threshold in enumerate(thresholds):
        graph = mat2graph_threshold(matrix, threshold)

        # Calculating global and average local efficiency
        glob[0, index] = nx.global_efficiency(graph)
        loc[0, index] = nx.local_efficiency(graph)

        # Community detection and modularity (1.25 )
        part = community.best_partition(graph, weight='1.25')
        Q[0, index] = community.modularity(part, graph)

        # Calculating connector and provincial hubs
        Z = module_degree_zscore(matrix, part)
        P = participation_coefficient(matrix, part)
        # connector hubs
        ch = np.zeros(matrix.shape[0])
        for i in range(len(ch)):
            if P[i] > 0.8 and Z[i] < 1.5:
                ch[i] = 1.0

            Ch[0, index] = np.sum(ch)

        # provincial hubs
        ph = np.zeros(matrix.shape[0])
        for i in range(len(ph)):
            if P[i] <= 0.3 and Z[i] >= 1.5:
                ph[i] = 1
            Ph[0, index] = np.sum(ph)

    # Averaging over each graph threshold
    meanglob = np.mean(glob)
    meanloc = np.mean(loc)
    meanQ = np.mean(Q)
    meanCh = np.mean(Ch)
    meanPh = np.mean(Ph)
    data[0, 0] = meanglob
    data[0, 1] = meanloc
    data[0, 2] = meanQ
    data[0, 3] = meanCh
    data[0, 4] = meanPh
    return (data)
Пример #15
0
def analyze_clustering(G):
    average_clustering_coefficient = approximation.average_clustering(G)
    average_clustering = nx.average_clustering(G)
    average_shortest_path_length = nx.average_shortest_path_length(G)
    local_efficiency = nx.local_efficiency(G)
    global_efficiency = nx.global_efficiency(G)
    table = prettytable.PrettyTable(
        ['Average clustering', 'Average clustering coefficient', 'Average shortest path length'])
    table.add_row([average_clustering, average_clustering_coefficient, average_shortest_path_length])
    print(table)
    table = prettytable.PrettyTable(['Local efficiency', 'Global efficiency'])
    table.add_row([local_efficiency, global_efficiency])
    print(table)
Пример #16
0
def avg_inverse_distance(graph, **kwargs):
    """
    The average inverse distance between all pairs of nodes in the graph.
    The larger the average inverse shortest path distance, the more robust the graph.
    This can be viewed through the lens of network connectivity i.e., larger average inverse distance
    --> better connected graph --> more robust graph :cite:`ellens2013graph`.

    Resolves the issue of not working for disconnected graphs in the avg_distance() function.

    :param graph: undirected NetworkX graph
    :return: a float
    """

    return round(nx.global_efficiency(graph), 2)
Пример #17
0
def circuit_analysis(depth, trail, varying_param):
    if gdv_name == "TFL":
        folder = "BNTF"
        depth_string = "{:02}".format(depth)
        name_end = "TFL"
        if nr_qubits == 54:
            name_end = "QSE"

    elif gdv_name == "QSE":
        folder = "BSS"
        depth_string = "{:03}".format(depth)
        name_end = "QSE"

    # if nr_qubits==54:
    #     gdv_name = "QSE"

    qasm_file_name = "_private_benchmark/{}/{}QBT_{}CYC_{}_{}.qasm".format(
        folder, nr_qubits, depth_string, name_end, trail)

    solution_file_name = "_private_benchmark/meta/{}QBT_{}CYC_{}_{}_solution.csv".format(
        nr_qubits, depth_string, name_end, trail)

    # print("qiskit", depth)
    # input qasm file as circuit
    test_circuit = qiskit.QuantumCircuit.from_qasm_file(qasm_file_name)

    """
        NetworkX analysis data

    """
    dag_circ = circuit_to_dag(test_circuit)

    undirect = dag_circ._multi_graph.to_undirected(as_view=True)
    weighted = convert_to_weighted_graph(dag_circ._multi_graph)

    # import matplotlib.pyplot as plt
    # # nx.draw_networkx(undirect, with_labels=False, node_size=10)
    # nx.draw_networkx(dag_circ._multi_graph, with_labels=False, node_size=10)
    # plt.show()

    return max(nx.pagerank(weighted).values()),\
        nx.number_connected_components(undirect),\
        undirect.number_of_edges(),\
            undirect.number_of_nodes(), \
           nx.global_efficiency(weighted), \
           nx.s_metric(dag_circ._multi_graph, False)
 def get_graphFeatures(self, G=None):
     if G is not None:
         #density
         density = nx.density(G)
         #average degree
         avg_degree = self.get_average_degree(G)
         #average closeness centrality
         avg_closeness = np.array(
             list(nx.closeness_centrality(G, distance='weight').values()))
         avg_closeness = np.mean(avg_closeness[avg_closeness != 0.0])
         #average betweenness centrality
         btw_centrality = np.array(
             list(nx.betweenness_centrality(G, weight='weight').values()))
         btw_centrality = np.mean(btw_centrality[btw_centrality != 0.0])
         #average harmonic centrality
         harmonic = np.array(
             list(nx.harmonic_centrality(G, distance='weight').values()))
         harmonic = np.mean(harmonic[harmonic != 0.0])
         #get eccentricity, radius, efficiency
         eccen = nx.eccentricity(G)
         radius = nx.radius(G, e=eccen)
         eccen = np.array(list(eccen.values()))
         avg_eccen = np.mean(eccen[eccen != 0.0])
         efficiency = nx.global_efficiency(G)
         #get transitivity and average cluster coefficient
         transitivity = nx.transitivity(G)
         avg_cluster_coef = nx.average_clustering(G,
                                                  weight='weight',
                                                  count_zeros=False)
         #avg vote rank
         avg_voteRank = stat.mean(nx.voterank(G))
         #avg information centrality
         ic = np.array(
             list(nx.information_centrality(G, weight='weight').values()))
         avg_ic = np.mean(ic[ic != 0.0])
         #avg load centrality
         lc = np.array(list(
             nx.load_centrality(G, weight='weight').values()))
         avg_lc = np.mean(lc[lc != 0.0])
         return [
             self.num_nuclei, density, avg_degree, avg_closeness,
             btw_centrality, harmonic, avg_eccen, radius, efficiency,
             transitivity, avg_cluster_coef, avg_voteRank, avg_ic, avg_lc
         ]
     else:
         return None
Пример #19
0
    def compute_features(self):

        # local effiency
        self.add_feature(
            "local_efficiency",
            lambda graph: nx.local_efficiency(graph),
            "The local efficiency",
            InterpretabilityScore(4),
        )

        # global effiency
        self.add_feature(
            "global_efficiency",
            lambda graph: nx.global_efficiency(graph),
            "The global efficiency",
            InterpretabilityScore(4),
        )
Пример #20
0
 def record(self, additional=True):
     with open(self.logging + '/info.txt', 'w') as f:
         f.write(" Number of nodes :" + str(len(self.nodes)) + '\n')
         f.write(" Number of edges :" + str(len(self.edges)) + '\n')
         f.write(" Number of samples :" + str(self.samplesCnt) + '\n')
         if additional:
             uG = nx.Graph(self.G)
             connectedCnt = nx.number_connected_components(uG)
             f.write(" number_connected_components :" + str(connectedCnt) +
                     '\n')
             if connectedCnt == 1:
                 f.write(" Diameter :" + str(nx.diameter(uG)) + '\n')
                 f.write(" Radius :" + str(nx.radius(uG)) + '\n')
                 f.write(" average_shortest_path_length :" +
                         str(nx.average_shortest_path_length(uG)) + '\n')
             f.write(" Density :" + str(nx.density(uG)) + '\n')
             f.write(" average_clustering :" +
                     str(nx.average_clustering(uG)) + '\n')
             f.write(" node_connectivity :" +
                     str(nx.node_connectivity(self.G)) + '\n')
             f.write(" global_efficiency :" +
                     str(nx.global_efficiency(uG)) + '\n')
Пример #21
0
def roda_funcoes_centralidade(nx_grafo, file_name_prefix):
    # Função para execução de algoritmos de centralidade de grafos
    # São executados os seguintes algoritmos:
    # Closeness Centrality, Betweenness Centrality e Efficiency Centrality
    # Os resultados das execuções são gravados em um arquivo .txt

    # Closeness Centrality
    print('Executando algoritmo de Closeness Centrality')
    closeness = nx.closeness_centrality(nx_grafo, distance='weight')

    # Betweenness Centrality
    print('Executando algoritmo de Betweenness Centrality')
    betweenness = nx.betweenness_centrality(nx_grafo,
                                            normalized=False,
                                            weight='weight')

    # Centrality degree
    print('Executando algoritmo de Centrality degree')
    degree_centrality = nx.degree_centrality(nx_grafo)

    # Efficiency Centrality
    print('Executando algoritmo de Global Efficiency')
    efficiency = nx.global_efficiency(nx.to_undirected(nx_grafo))

    # Abrindo arquivo texto para gravação dos resultados
    file = 'D:\\repos\\study\\mestrado\\artigos\\UBS\\resultados\\%s' \
        % str(file_name_prefix) + '_centralidade.txt'
    print(file)
    f = open(file, 'w+')
    f.write('Closeness Centrality' + '\n')
    f.write(str(closeness) + '\n')
    f.write('Betweenness Centrality' + '\n')
    f.write(str(betweenness) + '\n')
    f.write('Degree Centrality' + '\n')
    f.write(str(degree_centrality) + '\n')
    f.write('Efficiciency Centrality' + '\n')
    f.write(str(efficiency))
    f.close()
def extract_feature_matrices(metadata, basepath):
    IDs = metadata['URSI']
    clustering_coefficient_vectors = []
    local_efficiency_vectors = []
    participation_coefficient_vectors = []
    for i in range(len(IDs)):
        graph = read_input_graph(IDs[i])

        clustering_coefficient_vectors.append(
            list(nx.clustering(graph).values()))
        local_efficiency_vectors.append(
            [nx.global_efficiency(graph.subgraph(graph[v])) for v in graph])
        participation_coefficient_vectors.append(
            list(nx.hits(graph)[0].values()))

    edge_betweenness_vectors = compute_edge_betweenness(metadata)

    clustering_coefficient_matrix = to_feature_matrix(
        clustering_coefficient_vectors, metadata)
    local_efficiency_matrix = to_feature_matrix(local_efficiency_vectors,
                                                metadata)
    participation_coefficient_matrix = to_feature_matrix(
        participation_coefficient_vectors, metadata)
    edge_betweenness_matrix = to_feature_matrix(edge_betweenness_vectors,
                                                metadata)

    clustering_coefficient_matrix.to_csv(basepath +
                                         'matrix_clustering_coefficient.csv',
                                         index=False)
    local_efficiency_matrix.to_csv(basepath + 'matrix_local_efficiency.csv',
                                   index=False)
    participation_coefficient_matrix.to_csv(
        basepath + 'matrix_participation_coefficient.csv', index=False)
    edge_betweenness_matrix.to_csv(basepath + 'matrix_edge_betweenness.csv',
                                   index=False)

    return clustering_coefficient_matrix, local_efficiency_matrix, participation_coefficient_matrix, edge_betweenness_matrix
Пример #23
0
    def compute_features(self):

        g = rbc(self.graph)

        # Basic stats
        self.add_feature(
            "number_of_edges",
            lambda graph: graph.number_of_edges(),
            "Number of edges in Jaccard similarity graph",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "number_of_edges_no_selfloops",
            lambda graph: remove_selfloops(graph).number_of_edges(),
            "Number of edges, not including selfloops, in Jaccard similarity graph",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "connectance",
            lambda graph: nx.density(graph),
            "Connectance of Jaccard similarity graph",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "diameter",
            lambda graph: nx.diameter(ensure_connected(graph)),
            "Diameter of Jaccard similarity graph",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "radius",
            lambda graph: nx.radius(ensure_connected(graph)),
            "Radius of Jaccard similarity graph",
            InterpretabilityScore(5),
            function_args=g,
        )

        # Assortativity
        self.add_feature(
            "degree_assortativity_coeff",
            lambda graph: nx.degree_assortativity_coefficient(graph),
            "Similarity of connections in Jaccard similarity graph with respect to the node degree",
            InterpretabilityScore(4),
            function_args=g,
        )

        # Cliques
        self.add_feature(
            "graph_clique_number",
            lambda graph: nx.graph_clique_number(graph),
            "The size of the largest clique in the Jaccard similarity graph",
            InterpretabilityScore(3),
            function_args=g,
        )

        self.add_feature(
            "num_max_cliques",
            lambda graph: nx.graph_number_of_cliques(graph),
            "The number of maximal cliques in the Jaccard similarity graph",
            InterpretabilityScore(3),
            function_args=g,
        )

        # Clustering
        self.add_feature(
            "transitivity",
            lambda graph: nx.transitivity(graph),
            "Transitivity of the graph",
            InterpretabilityScore(4),
            function_args=g,
        )

        # Components
        self.add_feature(
            "is_connected",
            lambda graph: nx.is_connected(graph) * 1,
            "Whether the Jaccard similarity graph is connected or not",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "num_connected_components",
            lambda graph: nx.number_connected_components(graph),
            "The number of connected components",
            InterpretabilityScore(5),
            function_args=g,
        )

        self.add_feature(
            "largest_connected_component",
            lambda graph: ensure_connected(graph).number_of_nodes(),
            "The size of the largest connected component",
            InterpretabilityScore(4),
            function_args=g,
        )

        # Efficiency
        self.add_feature(
            "global_efficiency",
            lambda graph: nx.global_efficiency(graph),
            "The global efficiency",
            InterpretabilityScore(4),
            function_args=g,
        )

        # Node connectivity
        self.add_feature(
            "node_connectivity",
            lambda graph: nx.node_connectivity(graph),
            "Node connectivity",
            InterpretabilityScore(4),
            function_args=g,
        )

        self.add_feature(
            "edge_connectivity",
            lambda graph: nx.edge_connectivity(graph),
            "Edge connectivity",
            InterpretabilityScore(4),
            function_args=g,
        )
Пример #24
0
 def test_global_efficiency(self):
     assert_equal(nx.global_efficiency(self.G2), 5 / 6)
Пример #25
0
def compute_summaries(G):
    """ Compute network features, computational times and their nature.
    
    Evaluate 54 summary statistics of a network G, plus 4 noise variables,
    store the computational time to evaluate each summary statistic, and keep
    track of their nature (discrete or not).
        
        Args:
            G (networkx.classes.graph.Graph):
                an undirected networkx graph.
        
        Returns:
            resDicts (tuple): 
                a tuple containing the elements:
                - dictSums (dict): a dictionary with the name of the summaries
                as keys and the summary statistic values as values;
                - dictTimes (dict): a dictionary with the name of the summaries
                as keys and the time to compute each one as values;
                - dictIsDist (dict): a dictionary indicating if the summary is 
                discrete (True) or not (False).
                
    """

    dictSums = dict()  # Will store the summary statistic values
    dictTimes = dict()  # Will store the evaluation times
    dictIsDisc = dict()  # Will store the summary statistic nature

    # Extract the largest connected component
    Gcc = sorted(nx.connected_components(G), key=len, reverse=True)
    G_lcc = G.subgraph(Gcc[0])

    # Number of edges
    start = time.time()
    dictSums["num_edges"] = G.number_of_edges()
    dictTimes["num_edges"] = time.time() - start
    dictIsDisc["num_edges"] = True

    # Number of connected components
    start = time.time()
    dictSums["num_of_CC"] = nx.number_connected_components(G)
    dictTimes["num_of_CC"] = time.time() - start
    dictIsDisc["num_of_CC"] = True

    # Number of nodes in the largest connected component
    start = time.time()
    dictSums["num_nodes_LCC"] = nx.number_of_nodes(G_lcc)
    dictTimes["num_nodes_LCC"] = time.time() - start
    dictIsDisc["num_nodes_LCC"] = True

    # Number of edges in the largest connected component
    start = time.time()
    dictSums["num_edges_LCC"] = G_lcc.number_of_edges()
    dictTimes["num_edges_LCC"] = time.time() - start
    dictIsDisc["num_edges_LCC"] = True

    # Diameter of the largest connected component
    start = time.time()
    dictSums["diameter_LCC"] = nx.diameter(G_lcc)
    dictTimes["diameter_LCC"] = time.time() - start
    dictIsDisc["diameter_LCC"] = True

    # Average geodesic distance (shortest path length in the LCC)
    start = time.time()
    dictSums["avg_geodesic_dist_LCC"] = nx.average_shortest_path_length(G_lcc)
    dictTimes["avg_geodesic_dist_LCC"] = time.time() - start
    dictIsDisc["avg_geodesic_dist_LCC"] = False

    # Average degree of the neighborhood of each node
    start = time.time()
    dictSums["avg_deg_connectivity"] = np.mean(
        list(nx.average_degree_connectivity(G).values()))
    dictTimes["avg_deg_connectivity"] = time.time() - start
    dictIsDisc["avg_deg_connectivity"] = False

    # Average degree of the neighbors of each node in the LCC
    start = time.time()
    dictSums["avg_deg_connectivity_LCC"] = np.mean(
        list(nx.average_degree_connectivity(G_lcc).values()))
    dictTimes["avg_deg_connectivity_LCC"] = time.time() - start
    dictIsDisc["avg_deg_connectivity_LCC"] = False

    # Recover the degree distribution
    start_degree_extract = time.time()
    degree_vals = list(dict(G.degree()).values())
    degree_extract_time = time.time() - start_degree_extract

    # Entropy of the degree distribution
    start = time.time()
    dictSums["degree_entropy"] = ss.entropy(degree_vals)
    dictTimes["degree_entropy"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_entropy"] = False

    # Maximum degree
    start = time.time()
    dictSums["degree_max"] = max(degree_vals)
    dictTimes["degree_max"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_max"] = True

    # Average degree
    start = time.time()
    dictSums["degree_mean"] = np.mean(degree_vals)
    dictTimes["degree_mean"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_mean"] = False

    # Median degree
    start = time.time()
    dictSums["degree_median"] = np.median(degree_vals)
    dictTimes["degree_median"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_median"] = False

    # Standard deviation of the degree distribution
    start = time.time()
    dictSums["degree_std"] = np.std(degree_vals)
    dictTimes["degree_std"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_std"] = False

    # Quantile 25%
    start = time.time()
    dictSums["degree_q025"] = np.quantile(degree_vals, 0.25)
    dictTimes["degree_q025"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_q025"] = False

    # Quantile 75%
    start = time.time()
    dictSums["degree_q075"] = np.quantile(degree_vals, 0.75)
    dictTimes["degree_q075"] = time.time() - start + degree_extract_time
    dictIsDisc["degree_q075"] = False

    # Average geodesic distance
    start = time.time()
    dictSums["avg_shortest_path_length_LCC"] = nx.average_shortest_path_length(
        G_lcc)
    dictTimes["avg_shortest_path_length_LCC"] = time.time() - start
    dictIsDisc["avg_shortest_path_length_LCC"] = False

    # Average global efficiency:
    # The efficiency of a pair of nodes in a graph is the multiplicative
    # inverse of the shortest path distance between the nodes.
    # The average global efficiency of a graph is the average efficiency of
    # all pairs of nodes.
    start = time.time()
    dictSums["avg_global_efficiency"] = nx.global_efficiency(G)
    dictTimes["avg_global_efficiency"] = time.time() - start
    dictIsDisc["avg_global_efficiency"] = False

    # Harmonic mean which is 1/avg_global_efficiency
    start = time.time()
    dictSums["harmonic_mean"] = nx.global_efficiency(G)
    dictTimes["harmonic_mean"] = time.time() - start
    dictIsDisc["harmonic_mean"] = False

    # Average local efficiency
    # The local efficiency of a node in the graph is the average global
    # efficiency of the subgraph induced by the neighbors of the node.
    # The average local efficiency is the average of the
    # local efficiencies of each node.
    start = time.time()
    dictSums["avg_local_efficiency_LCC"] = nx.local_efficiency(G_lcc)
    dictTimes["avg_local_efficiency_LCC"] = time.time() - start
    dictIsDisc["avg_local_efficiency_LCC"] = False

    # Node connectivity
    # The node connectivity is equal to the minimum number of nodes that
    # must be removed to disconnect G or render it trivial.
    # Only on the largest connected component here.
    start = time.time()
    dictSums["node_connectivity_LCC"] = nx.node_connectivity(G_lcc)
    dictTimes["node_connectivity_LCC"] = time.time() - start
    dictIsDisc["node_connectivity_LCC"] = True

    # Edge connectivity
    # The edge connectivity is equal to the minimum number of edges that
    # must be removed to disconnect G or render it trivial.
    # Only on the largest connected component here.
    start = time.time()
    dictSums["edge_connectivity_LCC"] = nx.edge_connectivity(G_lcc)
    dictTimes["edge_connectivity_LCC"] = time.time() - start
    dictIsDisc["edge_connectivity_LCC"] = True

    # Graph transitivity
    # 3*times the number of triangles divided by the number of triades
    start = time.time()
    dictSums["transitivity"] = nx.transitivity(G)
    dictTimes["transitivity"] = time.time() - start
    dictIsDisc["transitivity"] = False

    # Number of triangles
    start = time.time()
    dictSums["num_triangles"] = np.sum(list(nx.triangles(G).values())) / 3
    dictTimes["num_triangles"] = time.time() - start
    dictIsDisc["num_triangles"] = True

    # Estimate of the average clustering coefficient of G:
    # Average local clustering coefficient, with local clustering coefficient
    # defined as C_i = (nbr of pairs of neighbors of i that are connected)/(nbr of pairs of neighbors of i)
    start = time.time()
    dictSums["avg_clustering_coef"] = nx.average_clustering(G)
    dictTimes["avg_clustering_coef"] = time.time() - start
    dictIsDisc["avg_clustering_coef"] = False

    # Square clustering (averaged over nodes):
    # the fraction of possible squares that exist at the node.

    # We average it over nodes
    start = time.time()
    dictSums["square_clustering_mean"] = np.mean(
        list(nx.square_clustering(G).values()))
    dictTimes["square_clustering_mean"] = time.time() - start
    dictIsDisc["square_clustering_mean"] = False

    # We compute the median
    start = time.time()
    dictSums["square_clustering_median"] = np.median(
        list(nx.square_clustering(G).values()))
    dictTimes["square_clustering_median"] = time.time() - start
    dictIsDisc["square_clustering_median"] = False

    # We compute the standard deviation
    start = time.time()
    dictSums["square_clustering_std"] = np.std(
        list(nx.square_clustering(G).values()))
    dictTimes["square_clustering_std"] = time.time() - start
    dictIsDisc["square_clustering_std"] = False

    # Number of 2-cores
    start = time.time()
    dictSums["num_2cores"] = len(nx.k_core(G, k=2))
    dictTimes["num_2cores"] = time.time() - start
    dictIsDisc["num_2cores"] = True

    # Number of 3-cores
    start = time.time()
    dictSums["num_3cores"] = len(nx.k_core(G, k=3))
    dictTimes["num_3cores"] = time.time() - start
    dictIsDisc["num_3cores"] = True

    # Number of 4-cores
    start = time.time()
    dictSums["num_4cores"] = len(nx.k_core(G, k=4))
    dictTimes["num_4cores"] = time.time() - start
    dictIsDisc["num_4cores"] = True

    # Number of 5-cores
    start = time.time()
    dictSums["num_5cores"] = len(nx.k_core(G, k=5))
    dictTimes["num_5cores"] = time.time() - start
    dictIsDisc["num_5cores"] = True

    # Number of 6-cores
    start = time.time()
    dictSums["num_6cores"] = len(nx.k_core(G, k=6))
    dictTimes["num_6cores"] = time.time() - start
    dictIsDisc["num_6cores"] = True

    # Number of k-shells
    # The k-shell is the subgraph induced by nodes with core number k.
    # That is, nodes in the k-core that are not in the k+1-core

    # Number of 2-shells
    start = time.time()
    dictSums["num_2shells"] = len(nx.k_shell(G, 2))
    dictTimes["num_2shells"] = time.time() - start
    dictIsDisc["num_2shells"] = True

    # Number of 3-shells
    start = time.time()
    dictSums["num_3shells"] = len(nx.k_shell(G, 3))
    dictTimes["num_3shells"] = time.time() - start
    dictIsDisc["num_3shells"] = True

    # Number of 4-shells
    start = time.time()
    dictSums["num_4shells"] = len(nx.k_shell(G, 4))
    dictTimes["num_4shells"] = time.time() - start
    dictIsDisc["num_4shells"] = True

    # Number of 5-shells
    start = time.time()
    dictSums["num_5shells"] = len(nx.k_shell(G, 5))
    dictTimes["num_5shells"] = time.time() - start
    dictIsDisc["num_5shells"] = True

    # Number of 6-shells
    start = time.time()
    dictSums["num_6shells"] = len(nx.k_shell(G, 6))
    dictTimes["num_6shells"] = time.time() - start
    dictIsDisc["num_6shells"] = True

    start = time.time()
    listOfCliques = list(nx.enumerate_all_cliques(G))
    enum_all_cliques_time = time.time() - start

    # Number of 4-cliques
    start = time.time()
    n4Clique = 0
    for li in listOfCliques:
        if len(li) == 4:
            n4Clique += 1
    dictSums["num_4cliques"] = n4Clique
    dictTimes["num_4cliques"] = time.time() - start + enum_all_cliques_time
    dictIsDisc["num_4cliques"] = True

    # Number of 5-cliques
    start = time.time()
    n5Clique = 0
    for li in listOfCliques:
        if len(li) == 5:
            n5Clique += 1
    dictSums["num_5cliques"] = n5Clique
    dictTimes["num_5cliques"] = time.time() - start + enum_all_cliques_time
    dictIsDisc["num_5cliques"] = True

    # Maximal size of a clique in the graph
    start = time.time()
    dictSums["max_clique_size"] = len(approximation.clique.max_clique(G))
    dictTimes["max_clique_size"] = time.time() - start
    dictIsDisc["max_clique_size"] = True

    # Approximated size of a large clique in the graph
    start = time.time()
    dictSums["large_clique_size"] = approximation.large_clique_size(G)
    dictTimes["large_clique_size"] = time.time() - start
    dictIsDisc["large_clique_size"] = True

    # Number of shortest path of size k
    start = time.time()
    listOfPLength = list(nx.shortest_path_length(G))
    path_length_time = time.time() - start

    # when k = 3
    start = time.time()
    n3Paths = 0
    for node in G.nodes():
        tmp = list(listOfPLength[node][1].values())
        n3Paths += tmp.count(3)
    dictSums["num_shortest_3paths"] = n3Paths / 2
    dictTimes["num_shortest_3paths"] = time.time() - start + path_length_time
    dictIsDisc["num_shortest_3paths"] = True

    # when k = 4
    start = time.time()
    n4Paths = 0
    for node in G.nodes():
        tmp = list(listOfPLength[node][1].values())
        n4Paths += tmp.count(4)
    dictSums["num_shortest_4paths"] = n4Paths / 2
    dictTimes["num_shortest_4paths"] = time.time() - start + path_length_time
    dictIsDisc["num_shortest_4paths"] = True

    # when k = 5
    start = time.time()
    n5Paths = 0
    for node in G.nodes():
        tmp = list(listOfPLength[node][1].values())
        n5Paths += tmp.count(5)
    dictSums["num_shortest_5paths"] = n5Paths / 2
    dictTimes["num_shortest_5paths"] = time.time() - start + path_length_time
    dictIsDisc["num_shortest_5paths"] = True

    # when k = 6
    start = time.time()
    n6Paths = 0
    for node in G.nodes():
        tmp = list(listOfPLength[node][1].values())
        n6Paths += tmp.count(6)
    dictSums["num_shortest_6paths"] = n6Paths / 2
    dictTimes["num_shortest_6paths"] = time.time() - start + path_length_time
    dictIsDisc["num_shortest_6paths"] = True

    # Size of the minimum (weight) node dominating set:
    # A subset of nodes where each node not in the subset has for direct
    # neighbor a node of the dominating set.
    start = time.time()
    T = approximation.min_weighted_dominating_set(G)
    dictSums["size_min_node_dom_set"] = len(T)
    dictTimes["size_min_node_dom_set"] = time.time() - start
    dictIsDisc["size_min_node_dom_set"] = True

    # Idem but with the edge dominating set
    start = time.time()
    T = approximation.min_edge_dominating_set(G)
    dictSums["size_min_edge_dom_set"] = 2 * len(
        T)  # times 2 to have a number of nodes
    dictTimes["size_min_edge_dom_set"] = time.time() - start
    dictIsDisc["size_min_edge_dom_set"] = True

    # The Wiener index of a graph is the sum of the shortest-path distances
    # between each pair of reachable nodes. For pairs of nodes in undirected graphs,
    # only one orientation of the pair is counted.
    # (On LCC otherwise inf)
    start = time.time()
    dictSums["wiener_index_LCC"] = nx.wiener_index(G_lcc)
    dictTimes["wiener_index_LCC"] = time.time() - start
    dictIsDisc["wiener_index_LCC"] = True

    # Betweenness node centrality (averaged over nodes):
    # at node u it is defined as B_u = sum_i,j sigma(i,u,j)/sigma(i,j)
    # where sigma is the number of shortest path between i and j going through u or not

    start = time.time()
    betweenness = list(nx.betweenness_centrality(G).values())
    time_betweenness = time.time() - start

    # Averaged across nodes
    start = time.time()
    dictSums["betweenness_centrality_mean"] = np.mean(betweenness)
    dictTimes["betweenness_centrality_mean"] = time.time(
    ) - start + time_betweenness
    dictIsDisc["betweenness_centrality_mean"] = False

    # Maximum across nodes
    start = time.time()
    dictSums["betweenness_centrality_max"] = max(betweenness)
    dictTimes["betweenness_centrality_max"] = time.time(
    ) - start + time_betweenness
    dictIsDisc["betweenness_centrality_max"] = False

    # Central point dominance
    # CPD = sum_u(B_max - B_u)/(N-1)
    start = time.time()
    dictSums["central_point_dominance"] = sum(
        max(betweenness) - np.array(betweenness)) / (len(betweenness) - 1)
    dictTimes["central_point_dominance"] = time.time(
    ) - start + time_betweenness
    dictIsDisc["central_point_dominance"] = False

    # Estrata index : sum_i^n exp(lambda_i)
    # with n the number of nodes, lamda_i the i-th eigen value of the adjacency matrix of G
    start = time.time()
    dictSums["Estrata_index"] = nx.estrada_index(G)
    dictTimes["Estrata_index"] = time.time() - start
    dictIsDisc["Estrata_index"] = False

    # Eigenvector centrality
    # For each node, it is the average eigenvalue centrality of its neighbors,
    # where centrality of node i is taken as the i-th coordinate of x
    # such that Ax = lambda*x (for the maximal eigen value)

    # Averaged
    start = time.time()
    dictSums["avg_eigenvec_centrality"] = np.mean(
        list(nx.eigenvector_centrality_numpy(G).values()))
    dictTimes["avg_eigenvec_centrality"] = time.time() - start
    dictIsDisc["avg_eigenvec_centrality"] = False

    # Maximum
    start = time.time()
    dictSums["max_eigenvec_centrality"] = max(
        list(nx.eigenvector_centrality_numpy(G).values()))
    dictTimes["max_eigenvec_centrality"] = time.time() - start
    dictIsDisc["max_eigenvec_centrality"] = False

    ### Noise generation ###

    # Noise simulated from a Normal(0,1) distribution
    start = time.time()
    dictSums["noise_Gauss"] = ss.norm.rvs(0, 1)
    dictTimes["noise_Gauss"] = time.time() - start
    dictIsDisc["noise_Gauss"] = False

    # Noise simulated from a Uniform distribution [0-50]
    start = time.time()
    dictSums["noise_Unif"] = ss.uniform.rvs(0, 50)
    dictTimes["noise_Unif"] = time.time() - start
    dictIsDisc["noise_Unif"] = False

    # Noise simulated from a Bernoulli B(0.5) distribution
    start = time.time()
    dictSums["noise_Bern"] = ss.bernoulli.rvs(0.5)
    dictTimes["noise_Bern"] = time.time() - start
    dictIsDisc["noise_Bern"] = True

    # Noise simulated from a discrete uniform distribution [0,50[
    start = time.time()
    dictSums["noise_disc_Unif"] = ss.randint.rvs(0, 50)
    dictTimes["noise_disc_Unif"] = time.time() - start
    dictIsDisc["noise_disc_Unif"] = True

    resDicts = (dictSums, dictTimes, dictIsDisc)

    return resDicts
Пример #26
0
def efficiency(g):
    return nx.global_efficiency(g)
Пример #27
0
 def test_global_efficiency(self):
     assert_equal(nx.global_efficiency(self.G2), 5 / 6)
Пример #28
0
e_list_list[7] = [[0, 1], [1, 2], [2, 3], [0, 3]]
e_list_list[8] = [[0, 1], [1, 2], [1, 3]]
e_list_list[9] = [[0, 1], [1, 2], [2, 3]]
e_list_list[10] = [[0, 1], [0, 2], [0, 3], [0, 4], [1, 5], [2, 5], [3, 6],
                   [4, 6], [5, 7], [5, 8], [6, 9], [6, 7], [8, 9], [7, 10],
                   [8, 10], [9, 10]]
e_list_list[11] = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 3], [2, 4], [4, 0]]
e_list_list[12] = [[0, 1], [0, 2], [1, 3], [2, 3], [3, 4], [3, 5], [4, 5]]
e_list_list[13] = [[0, 1], [0, 2], [1, 2], [1, 3], [2, 3], [3, 4], [3, 5],
                   [4, 5]]
e_list_list[14] = [[0, 2], [1, 2], [2, 3], [2, 4], [4, 5], [2, 5], [5, 6],
                   [5, 7], [5, 8]]
e_list_list[15] = [[0, 3], [1, 3], [2, 3], [3, 4], [3, 5], [3, 6], [5, 6],
                   [5, 7], [6, 8]]

for i in range(len(e_list_list)):
    g = g_list[i]
    g.add_edges_from(e_list_list[i])
    plt.subplot()
    nx.draw(g, with_labels=True)
    plt.show()

    g_cont = Merger(g)
    print(nx.global_efficiency(g))
    g_cont.cont_all_cliques(min_clique_node=3)
    print(nx.global_efficiency(g_cont.graph))
    plt.subplot()
    nx.draw(g_cont.graph, with_labels=True)
    plt.show()
    # g_cont.print_concat_nodes()
import pandas as pd
import matplotlib.pyplot as plt

#setting todo file folder path
to_do_dir = ''

dataname = re.compile(r'.+(?=\.)')
todo_list = []
if os.path.exists(finish_dir):
    for file_name in os.listdir(to_do_dir):
        file_name_tmp = dataname.match(file_name)
        todo_list.append(file_name_tmp.group())

for i in todo_list:
    csv_path = os.path.join(to_do_dir, i) + '.csv'
    data = pd.read_csv(csv_path)
    G = nx.from_pandas_edgelist(data, source="from_id", target="to_id")
    print("task:", i)
    print(i, nx.info(G))
    largest_cc = max(nx.connected_components(G), key=len)
    G2 = G.subgraph(list(largest_cc))
    influence = len(G2.nodes()) / len(G.nodes())
    print("infulence rate:", i, influence)
    ave_path = nx.average_shortest_path_length(G2)
    print("average_shortest_path_length:", i, ave_path)
    global_efficiency = nx.global_efficiency(G2) / nx.global_efficiency(G)
    print("global_efficiency:", i, global_efficiency)
    print("task finished:", i)
    print("...")
    print("...")
Пример #30
0
def local_efficiency(G, nodes):
    v = [nx.global_efficiency(G.subgraph(G[n])) for n in nodes]
    return (valuesDict(v, nodes))
print("Triadic closure:", triadic_closure * 100, "%")

# Triangles in network for each node
triangles = nx.triangles(G)
print("Triangles in graph: ", nx.triangles(G))

# Clustering of a node
clustering = nx.clustering(G)
print("Clustering coefficient : ", clustering)

# Average clustering
avg_clustering = nx.average_clustering(G)
print("Average clustering of a graph G is:", avg_clustering)

# Efficiency
print("Efficiency of graph:", nx.global_efficiency(G))

# Degree - node's degree is the sum of its edges


def takesecond(elem):
    return elem[1]


degree_list = list(G.degree())
# node_sizes is list used for drawing graph, its calculated from list of degree's elements * 10
node_sizes = []
for i in degree_list:
    node_sizes.append(i[1] * 10)

# sort and print out degree list
Пример #32
0
def test_global_efficiency():
    G = nx.cycle_graph(4)
    assert_equal(nx.global_efficiency(G), 5 / 6)
Пример #33
0
def test_global_efficiency():
    G = nx.cycle_graph(4)
    assert_equal(nx.global_efficiency(G), 5 / 6)