Esempio n. 1
0
def close_enough(G, num_edge):
    #return: true if the point is close enough to the original, false if not
    G_new = sample_edges(G, num_edge)
    real = nx.degree_pearson_correlation_coefficient(G)
    current = nx.degree_pearson_correlation_coefficient(G_new)

    if (current <= real * 1.1 and current >= real * 0.9):
        return True
    else:
        return False
Esempio n. 2
0
 def degassortcoef(self):
     '''
     It returns degree assortativity coefficient path from each network
     '''
     dac = pd.DataFrame(np.nan,
                        index=self.indiv,
                        columns=['ave_cluster_coef'])
     for k in self.indiv:
         G = nx.from_numpy_matrix(self.nets[k].values)
         G.edges(data=True)
         dac.loc[k] = nx.degree_pearson_correlation_coefficient(
             G, weight='weight')
         print nx.degree_pearson_correlation_coefficient(G, weight='weight')
     return dac
	def create_graph(self):
		g = nx.Graph()
		duplicated_nodes_list = self.only_nodes.iloc[:,0].append(self.only_nodes.iloc[:,1]).reset_index(drop=True)
		nodes_list = duplicated_nodes_list.values.tolist()
		No_duplicate_nodes = set(nodes_list)
		# print(len(No_duplicate_nodes))#327
		g.add_nodes_from(No_duplicate_nodes)
		g.add_edges_from(self.No_duplicate_links)
		# nx.draw(g,node_size = 1.5)#with_labels=True
		# plt.draw()
		# plt.show()
		link_density = nx.density(g)
		# print(link_density)#0.109
		average_degree = nx.average_neighbor_degree(g)
		# numbers degreeede= [average_degree[key] for key in average_degree]
		# mean = statistics.mean(numbers)
		# var = statistics.variance(numbers)
		# print(var)
		degree_correlation = nx.degree_pearson_correlation_coefficient(g) 
		# print(degree_correlation)#0.033175769936049336
		average_clustering = nx.average_clustering(g)
		# print(average_clustering)#0.5035048191728447
		average_hopcount = nx.average_shortest_path_length(g)
		# print(average_hopcount)#2.1594341569576554
		diameter = nx.diameter(g)
		# print(diameter)#4
		# A = nx.adjacency_matrix(g)
		A_eigenvalue = nx.adjacency_spectrum(g)
		# print(max(A_eigenvalue))#(41.231605032525835+0j)
		G_eigenvalue = nx.laplacian_spectrum(g)
		# print(sorted(G_eigenvalue))#1.9300488624481513
		return g, nodes_list, No_duplicate_nodes, link_density, average_degree
Esempio n. 4
0
    def get_default_feature(self, graph: nx.Graph, direct):
        result = []
        result.append(len(graph.nodes))
        result.append(nx.density(graph))
        degrees = nx.degree(graph)
        degrees = np.array([item[1]for item in degrees])
        clusters = nx.clustering(graph)
        clusters = np.array([clusters[item] for item in clusters.keys()])
        # topologic = nx.topological_sort(graph)
        result.append(degrees.mean())
        result.append(degrees.max())
        result.append(degrees.min())
        result.append(degrees.var())

        result.append(clusters.mean())
        result.append(clusters.max())
        result.append(clusters.var())
        if direct:
            # 计算有方向的时候的补充特征
            pass
        else:
            # 计算无向的时候的补充特征
            correlation = nx.degree_pearson_correlation_coefficient(
                graph)
            result.append(correlation if correlation is not np.nan else 0.0)
        return list(result)
Esempio n. 5
0
    def compute_features(self):
        # Adding basic node and edge numbers
        self.add_feature(
            "degree_assortativity_coeff",
            lambda graph: nx.degree_assortativity_coefficient(graph),
            "Similarity of connections in the graph with respect to the node degree",
            InterpretabilityScore(4),
        )

        self.add_feature(
            "degree_assortativity_coeff_pearson",
            lambda graph: nx.degree_pearson_correlation_coefficient(graph),
            "Similarity of connections in the graph with respect to the node degree",
            InterpretabilityScore(4),
        )

        average_neighbor_degree = lambda graph: list(
            assortativity.average_neighbor_degree(graph).values())
        self.add_feature(
            "degree assortativity",
            average_neighbor_degree,
            "average neighbor degree",
            InterpretabilityScore(4),
            statistics="centrality",
        )
Esempio n. 6
0
 def print_graph_stats(self, G):
     print('average clustering: ', nx.average_clustering(G))
     degrees = [val for (node, val) in G.degree()]
     print('average degree: ', sum(degrees) / float(len(G)))
     print('density: ', nx.density(G))
     print('assortativity coefficient (Pearson\'s rho): ',
           nx.degree_pearson_correlation_coefficient(G))
     print('(should be negative according to Gonzalez et al., 2007)')
     import operator
     x = nx.betweenness_centrality(G)
     sorted_x = sorted(x.items(), key=operator.itemgetter(1))
     print('betweenness centrality', sorted_x)
     x = nx.degree_centrality(G)
     sorted_x = sorted(x.items(), key=operator.itemgetter(1))
     print('degree centrality:', sorted_x)
     x = nx.eigenvector_centrality(G)
     sorted_x = sorted(x.items(), key=operator.itemgetter(1))
     print('eigenvector centrality:', sorted_x)
     x = nx.katz_centrality(G)
     sorted_x = sorted(x.items(), key=operator.itemgetter(1))
     print('Katz centrality:', sorted_x)
     x = nx.closeness_centrality(G)
     sorted_x = sorted(x.items(), key=operator.itemgetter(1))
     print('closeness centrality:', sorted_x)
     print('\n')
Esempio n. 7
0
    def correlation_vertex_degree(self, cvde=False):
        """
        Computes the correlation of vertex degree.

        The computation is done on the simplified graph.

        Parameters
        ----------
        cvde : float
            Optional input: coefficient of variation of the degree.
            If not provided, it is computed automatically internally.

        Returns
        -------
        float
            Correlation of Vertex Degree

        Examples
        --------
           >>> cvd = correlation_vertex_degree()
        """
        if not cvde:
            _, cvde = self.mean_degree_and_CV()

        # To avoid division by 0 when computing correlation coef
        if cvde != 0:
            cvd = nx.degree_pearson_correlation_coefficient(self.graph_simpl)
        else:
            cvd = 1

        return cvd
Esempio n. 8
0
def analyse_graph(G):
    print(nx.info(G))

    n_components = nx.number_connected_components(G)
    print("Number of connected components:", n_components)
    if n_components > 1:
        component_sizes = [
            len(c)
            for c in sorted(nx.connected_components(G), key=len, reverse=True)
        ]
        print("Connected component sizes:", component_sizes)
        lcc_percent = 100 * component_sizes[0] / G.number_of_nodes()
        print(f"LCC: {lcc_percent}%")

    avg_c = nx.average_clustering(G)
    print("Average clustering coefficient:", avg_c)
    degree_assortativity = nx.degree_pearson_correlation_coefficient(G)
    print("Degree assortativity:", degree_assortativity)
    if nx.is_connected(G):
        avg_d = nx.average_shortest_path_length(G)
        print("Average distance:", avg_d)
    else:
        avg_distances = [
            nx.average_shortest_path_length(C)
            for C in (G.subgraph(c).copy() for c in nx.connected_components(G))
        ]
        print("Average distances:", avg_distances)

    avg_connectivity = nx.average_degree_connectivity(G)
    print("Average degree connectivity:", avg_connectivity)
Esempio n. 9
0
def assortativity(g, degree, weights=None):
    '''
    Returns the assortativity of the graph.
    This tells whether nodes are preferentially connected together depending
    on their degree.

    Parameters
    ----------
    g : :class:`~nngt.Graph`
        Graph to analyze.
    degree : str
        The type of degree that should be considered.
    weights : bool or str, optional (default: binary edges)
        Whether edge weights should be considered; if ``None`` or ``False``
        then use binary edges; if ``True``, uses the 'weight' edge attribute,
        otherwise uses any valid edge attribute required.

    References
    ----------
    .. [nx-assortativity]
       :nxdoc:`algorithms.assortativity.degree_assortativity_coefficient`
    '''
    w = _get_nx_weights(g, weights)

    return nx.degree_pearson_correlation_coefficient(
        g.graph, x=degree, y=degree, weight=w)
Esempio n. 10
0
def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info("Computing single valued measures:")
    measures = {}
    iflogger.info("...Computing degree assortativity (pearson number) ...")
    try:
        measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures["degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(ntwk)
    iflogger.info("...Computing degree assortativity...")
    try:
        measures["degree_assortativity"] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures["degree_assortativity"] = nx.degree_assortativity_coefficient(ntwk)
    iflogger.info("...Computing transitivity...")
    measures["transitivity"] = nx.transitivity(ntwk)
    iflogger.info("...Computing number of connected_components...")
    measures["number_connected_components"] = nx.number_connected_components(ntwk)
    iflogger.info("...Computing average clustering...")
    measures["average_clustering"] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info("...Calculating average shortest path length...")
        measures["average_shortest_path_length"] = nx.average_shortest_path_length(ntwk, weighted)
    if calculate_cliques:
        iflogger.info("...Computing graph clique number...")
        measures["graph_clique_number"] = nx.graph_clique_number(ntwk)  # out of memory error
    return measures
Esempio n. 11
0
def compute_graph(graph, nodePositions, mask):
    """Compute graph properties.

    Parameters
    ----------
    graph : original graph
    nodePositions : node positions
    mask : binary array of cellular region of interest

    Returns
    -------
    properties : list of graph properties

    """
    nodeNumber = graph.number_of_nodes()
    edgeNumber = graph.number_of_edges()
    connectedComponents = connected_components(graph)
    connectedComponentsNumber = len(connectedComponents)
    edgeCapacity = 1.0 * np.array([property['capa'] for node1, node2, property in graph.edges(data=True)])
    bundling = np.nanmean(edgeCapacity)
    assortativity = nx.degree_pearson_correlation_coefficient(graph, weight='capa')
    shortestPathLength = path_lengths(graph)
    reachability = np.nanmean(shortestPathLength)
    shortestPathLengthSD = np.nanstd(shortestPathLength)
    shortestPathLengthCV = 1.0 * shortestPathLengthSD / reachability
    algebraicConnectivity = np.sort(nx.laplacian_spectrum(graph, weight='capa'))[1]
    edgeAngles = edge_angles(graph, nodePositions, mask)
    edgeAnglesMean = np.nanmean(edgeAngles)
    edgeAnglesSD = np.nanstd(edgeAngles)
    edgeAnglesCV = 1.0 * edgeAnglesSD / edgeAnglesMean
    edgeCrossings = crossing_number(graph, nodePositions)
    edgeCrossingsMean = np.nanmean(edgeCrossings)
    properties = [nodeNumber, edgeNumber, connectedComponentsNumber, bundling, assortativity, reachability, shortestPathLengthCV, algebraicConnectivity, edgeAnglesCV, edgeCrossingsMean]
    return(properties)
Esempio n. 12
0
def metr_gr(graph):
    deg = nx.degree_centrality(graph)
    max_deg = sorted(deg, key=deg.get, reverse=True)
    print('Max degree centrality:', ", ".join(max_deg[:5]))

    bet = nx.betweenness_centrality(graph)
    max_bet = sorted(bet, key=bet.get, reverse=True)
    print('Max betweenness centrality:', ", ".join(max_bet[:5]))

    clos = nx.closeness_centrality(graph)
    max_clos = sorted(clos, key=clos.get, reverse=True)
    print('Max closeness centrality:', ", ".join(max_clos[:5]))

    eig = nx.eigenvector_centrality(graph)
    max_eig = sorted(eig, key=eig.get, reverse=True)
    print('Max eigencentrality:', ", ".join(max_eig[:5]))

    print('Плотность:', nx.density(graph))
    print('Радиус:', nx.radius(graph))
    print('Диаметр:', nx.diameter(graph))
    print('Коэффициент кластеризации:', nx.average_clustering(graph))
    print('Коэффициент ассортативности:',
          nx.degree_pearson_correlation_coefficient(graph))

    return clos
def graph_stats(G, file=None):
    """
    Prints stats about a graph
    :param G:       networkx Graph
    :param file:    the file to write the stats
    :return:
    """
    info = "Network info:\n" + nx.info(G)
    print(info)
    file.write(info + '\n')
    dens = "Network density: " + str(nx.density(G))
    print(dens)
    file.write(dens + '\n')
    con = "Network connected?: " + str(nx.is_connected(G))
    print(con)
    file.write(con + '\n')
    # if nx.is_connected(G):
    #     diam = "Network diameter: " + str(nx.diameter(G))
    #     print(diam)
    #     f.write(diam)
    avg_cl = 'Average clustering coeff: ' + str(nx.average_clustering(G))
    print(avg_cl)
    file.write(avg_cl + '\n')
    trans = "Triadic closure: " + str(nx.transitivity(G))
    print(trans)
    file.write(trans + '\n')
    pear = 'Degree Pearson corr coeff: ' + str(
        nx.degree_pearson_correlation_coefficient(G))
    print(pear)
    file.write(pear + '\n')
Esempio n. 14
0
def network_metrics(network, network_red, n_fibres, tag=''):
    """Analyse networkx Graph object"""

    database = pd.Series(dtype=object)

    database['No. Fibres'] = n_fibres

    cross_links = np.array([degree[1] for degree in network.degree], dtype=int)
    database[f"{tag} Network Cross-Link Density"] = ((cross_links > 2).sum() /
                                                     n_fibres)

    try:
        value = nx.degree_pearson_correlation_coefficient(network,
                                                          weight='r')**2
    except Exception as err:
        logger.debug(f'Network Degree calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Degree"] = value

    try:
        value = np.real(nx.adjacency_spectrum(network_red).max())
    except Exception as err:
        logger.debug(f'Network Eigenvalue calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Eigenvalue"] = value

    try:
        value = nx.algebraic_connectivity(network_red, weight='r')
    except Exception as err:
        logger.debug(f'Network Connectivity calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Connectivity"] = value

    return database
    def data_analysis(self, graph):
        data_vec = [0] * 13
        num_vertex = nx.number_of_nodes(graph)

        data_vec[0] = nx.average_clustering(graph)

        sq_values = list(nx.square_clustering(graph).values())
        data_vec[1] = sum(sq_values) / len(sq_values)

        g = nx.path_graph(num_vertex)
        data_vec[2] = nx.average_shortest_path_length(
            graph) / nx.average_shortest_path_length(g)

        data_vec[3] = nx.degree_pearson_correlation_coefficient(graph)
        if math.isnan(data_vec[3]) is True:
            data_vec[3] = 0

        data_vec[4] = nx.diameter(graph) / (num_vertex - 1)
        data_vec[5] = nx.density(graph)

        data_vec[6] = nx.edge_connectivity(graph) / (num_vertex - 1)

        g = nx.star_graph(num_vertex - 1)
        Freeman_degree_norm = self.freeman_centralization(
            nx.degree_centrality(g))
        Freeman_close_norm = self.freeman_centralization(
            nx.closeness_centrality(g))
        Freeman_between_norm = self.freeman_centralization(
            nx.betweenness_centrality(g))
        # need to change
        Freeman_eigen_norm = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(g))

        data_vec[7] = self.freeman_centralization(
            nx.degree_centrality(graph)) / Freeman_degree_norm
        data_vec[8] = self.freeman_centralization(
            nx.closeness_centrality(graph)) / Freeman_close_norm
        data_vec[9] = self.freeman_centralization(
            nx.betweenness_centrality(graph)) / Freeman_between_norm

        # warning, the way it normalized may not correct
        data_vec[10] = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(graph)) / Freeman_eigen_norm

        egvl_lap = nx.laplacian_spectrum(graph)
        egvl_lap = np.sort(egvl_lap)
        egvl_lap = np.delete(egvl_lap, 0, 0)
        summ = 0
        for mu in egvl_lap:
            summ += (1 / mu)

        summ = summ * num_vertex
        data_vec[11] = (num_vertex - 1) / summ

        # for simple graph(adj matrix is symmetric), eigenvalue must be real number.
        egvl_adj = np.real(nx.adjacency_spectrum(graph))
        data_vec[12] = max(egvl_adj) / (num_vertex - 1)

        return data_vec
 def assortativity(G):
     """次数相関(assortativity)
     ref : http://www.logos.ic.i.u-tokyo.ac.jp/~chik/InfoTech12/08%20Masuda.pdf
     :param G: graph
     :return: float, assortativity
     """
     # nx.degree_assorattivity_coefficientと同じだが,計算時間が早い
     return nx.degree_pearson_correlation_coefficient(G)
Esempio n. 17
0
def more_inf(g):
    r = nx.radius(g)
    d = nx.diameter(g)
    ka = nx.degree_pearson_correlation_coefficient(g)
    p = nx.density(g)
    kc = nx.average_clustering(g)
    nn = g.number_of_nodes()
    ne = g.number_of_edges()
    return r, d, p, ka, kc, nn, ne
Esempio n. 18
0
def analyzeGraph(G):
    s = ""
    s += "Радиус графа: " + str(nx.radius(G)) + "<br/>"
    s += "Диаметр графа: " + str(nx.diameter(G)) + "<br/>"
    s += "Коэффициент ассортативности: " + str(
        nx.degree_pearson_correlation_coefficient(G)) + "<br/>"
    s += "Плотность графа: " + str(nx.density(G)) + "<br/>"
    s += "Коэффициент кластеризации: " + str(
        nx.average_clustering(G)) + "<br/>"
    return s
Esempio n. 19
0
def info_network(G):
    from networkx.algorithms import bipartite
    from decimal import Decimal

    print G.number_of_nodes()
    print G.number_of_edges()

    print "average_neighbor_degree"
    dict = nx.average_neighbor_degree(G)
    list1 = dict.keys()
    list2 = dict.values()
    print list1
    print list2

    print "degree_assortativity_coefficient"
    print nx.degree_assortativity_coefficient(G)

    print "degree_pearson_correlation_coefficient"
    print nx.degree_pearson_correlation_coefficient(G)
    # print nx.k_nearest_neighbors(G)
    print "STOP HERE"

    print "bipartite.closeness_centrality(G,G.node)"
    dict2 = bipartite.closeness_centrality(G, G.node)
    list3 = dict2.values()
    print list3

    print "nx.degree_centrality(G)"
    dict3 = nx.degree_centrality(G)
    list4 = dict3.values()
    print list4

    print "nx.betweenness_centrality(G)"
    dict4 = nx.betweenness_centrality(G)
    list5 = dict4.values()
    print list5

    print "hits_numpy"
    dict5 = nx.hits_numpy(G)
    print dict5
Esempio n. 20
0
def network_stats(G):

    node_count = len(G)

    edge_count = len(G.edges())
    density = (2 * edge_count) / (node_count * (node_count - 1))
    clustering = np.mean(list(nx.clustering(G).values()))

    degree_dist = list(dict(G.degree()).values())
    k_avg = np.mean(degree_dist)
    k_std = np.std(degree_dist, ddof=1)

    giant_component = max(nx.connected_components(G))
    giant_component_percent = float(len(giant_component)) / float(node_count)

    if min(degree_dist) < 1:
        adjust = np.abs(min(degree_dist)) + 1
    else:
        adjust = 0

    reweighted_dist = [val + adjust for val in degree_dist]

    normed_dist = [
        reweighted_dist.count(val) / node_count
        for val in range(1, node_count + 1)
    ]
    entropy = sp.entropy(normed_dist)

    max_ent_dis = list(range(1, node_count + 1))
    max_ent = sp.entropy(max_ent_dis)

    entropy = entropy / max_ent

    if k_std == 0:  #if degrees equal
        assortativity = 1
    else:
        assortativity = nx.degree_pearson_correlation_coefficient(G)

    if np.isnan(assortativity):
        assortativity = 0

    disassortativity = assortativity * -1

    stats = dict(
        (('node_count', node_count), ('edge_count', edge_count),
         ('clustering', clustering), ('giant component',
                                      giant_component_percent),
         ('disssortativity', disassortativity), ('k avg', k_avg),
         ('k std', k_std), ('entropy', entropy), ('density', density)))

    return stats
Esempio n. 21
0
def centrality_and_params_graph(g):
    degree_counter = []
    betweenness_counter = []
    closeness_counter = []
    eigen_counter = []

    deg = nx.degree_centrality(g)
    for nodeid in sorted(deg, key=deg.get, reverse=True):
        degree_counter.append(nodeid)
    biggest_deg = {key: value for key, value in deg.items() if value in
                   sorted(set(deg.values()), reverse=True)[:2]}
    bet = nx.betweenness_centrality(g)
    for nodeid in sorted(bet, key=bet.get, reverse=True):
        betweenness_counter.append(nodeid)
    biggest_bet = {key: value for key, value in bet.items() if value in
                   sorted(set(bet.values()), reverse=True)[:2]}
    cls = nx.closeness_centrality(g)
    for nodeid in sorted(cls, key=cls.get, reverse=True):
        closeness_counter.append(nodeid)
    biggest_cls = {key: value for key, value in cls.items() if value in
                   sorted(set(cls.values()), reverse=True)[:2]}
    eig = nx.eigenvector_centrality(g)
    for nodeid in sorted(eig, key=eig.get, reverse=True):
        eigen_counter.append(nodeid)
    biggest_eig = {key: value for key, value in eig.items() if value in
                   sorted(set(eig.values()), reverse=True)[:2]}

    radius = nx.radius(g)

    diameter = nx.diameter(g)

    assort_coef = nx.degree_pearson_correlation_coefficient(g)

    clustering = nx.average_clustering(g)

    node_number = g.number_of_nodes()

    edge_number = g.number_of_edges()

    communities = community.greedy_modularity_communities(g)

    dict_return = {'biggest_deg': biggest_deg, 'biggest_bet': biggest_bet,
                   'biggest_cls': biggest_cls, 'biggest_eig': biggest_eig,
                   'radius': radius, 'diameter': diameter,
                   'assort_coef': assort_coef,
                   'clustering': clustering, 'node_number': node_number,
                   'edge_number': edge_number,
                   'communities': communities}
    print(dict_return)
    return 0
Esempio n. 22
0
def compute_singlevalued_measures(ntwk,
                                  weighted=True,
                                  calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info("Computing single valued measures:")
    measures = {}
    iflogger.info("...Computing degree assortativity (pearson number) ...")
    try:
        measures["degree_pearsonr"] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures[
            "degree_pearsonr"] = nx.degree_pearson_correlation_coefficient(
                ntwk)
    iflogger.info("...Computing degree assortativity...")
    try:
        measures["degree_assortativity"] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures["degree_assortativity"] = nx.degree_assortativity_coefficient(
            ntwk)
    iflogger.info("...Computing transitivity...")
    measures["transitivity"] = nx.transitivity(ntwk)
    iflogger.info("...Computing number of connected_components...")
    measures["number_connected_components"] = nx.number_connected_components(
        ntwk)
    iflogger.info("...Computing graph density...")
    measures["graph_density"] = nx.density(ntwk)
    iflogger.info("...Recording number of edges...")
    measures["number_of_edges"] = nx.number_of_edges(ntwk)
    iflogger.info("...Recording number of nodes...")
    measures["number_of_nodes"] = nx.number_of_nodes(ntwk)
    iflogger.info("...Computing average clustering...")
    measures["average_clustering"] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info("...Calculating average shortest path length...")
        measures[
            "average_shortest_path_length"] = nx.average_shortest_path_length(
                ntwk, weighted)
    else:
        iflogger.info("...Calculating average shortest path length...")
        measures[
            "average_shortest_path_length"] = nx.average_shortest_path_length(
                nx.connected_component_subgraphs(ntwk)[0], weighted)
    if calculate_cliques:
        iflogger.info("...Computing graph clique number...")
        measures["graph_clique_number"] = nx.graph_clique_number(
            ntwk)  # out of memory error
    return measures
Esempio n. 23
0
def compute_singlevalued_measures(ntwk, weighted=True,
                                  calculate_cliques=False):
    """
    Returns a single value per network
    """
    iflogger.info('Computing single valued measures:')
    measures = {}
    iflogger.info('...Computing degree assortativity (pearson number) ...')
    try:
        measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk)
    except AttributeError:  # For NetworkX 1.6
        measures[
            'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(
                ntwk)
    iflogger.info('...Computing degree assortativity...')
    try:
        measures['degree_assortativity'] = nx.degree_assortativity(ntwk)
    except AttributeError:
        measures['degree_assortativity'] = nx.degree_assortativity_coefficient(
            ntwk)
    iflogger.info('...Computing transitivity...')
    measures['transitivity'] = nx.transitivity(ntwk)
    iflogger.info('...Computing number of connected_components...')
    measures['number_connected_components'] = nx.number_connected_components(
        ntwk)
    iflogger.info('...Computing graph density...')
    measures['graph_density'] = nx.density(ntwk)
    iflogger.info('...Recording number of edges...')
    measures['number_of_edges'] = nx.number_of_edges(ntwk)
    iflogger.info('...Recording number of nodes...')
    measures['number_of_nodes'] = nx.number_of_nodes(ntwk)
    iflogger.info('...Computing average clustering...')
    measures['average_clustering'] = nx.average_clustering(ntwk)
    if nx.is_connected(ntwk):
        iflogger.info('...Calculating average shortest path length...')
        measures[
            'average_shortest_path_length'] = nx.average_shortest_path_length(
                ntwk, weighted)
    else:
        iflogger.info('...Calculating average shortest path length...')
        measures[
            'average_shortest_path_length'] = nx.average_shortest_path_length(
                nx.connected_component_subgraphs(ntwk)[0], weighted)
    if calculate_cliques:
        iflogger.info('...Computing graph clique number...')
        measures['graph_clique_number'] = nx.graph_clique_number(
            ntwk)  # out of memory error
    return measures
def describe_graph(G):
    """Graph description"""

    # GRAPH DESCRIPTION
    graph_desc = pd.Series()
    # n. nodes
    graph_desc["number_of_nodes"] = G.number_of_nodes()
    # n. edges
    graph_desc["number_of_edges"] = G.number_of_edges()
    # n. of selfloops
    graph_desc["number_of_selfloops"] = len(G.selfloop_edges())

    # density
    graph_desc["average_shortest_path_length"] = nx.average_shortest_path_length(G)
    # connectivity
    # graph_desc.append(pd.Series(nx.degree_assortativity_coefficient(G), name="degree_assortativity_coefficient"))
    graph_desc["degree_pearson_correlation_coefficient"] = nx.degree_pearson_correlation_coefficient(G)

    # NODE DESCRIPTION
    node_desc = list()
    # n. of neighbours
    node_desc.append(pd.Series(G.degree(), name="degree"))
    node_desc.append(pd.Series(nx.average_neighbor_degree(G), name="average_neighbor_degree"))
    # n. of outgoing
    outgoing = pd.Series(G.in_degree(), name="in_degree")
    node_desc.append(outgoing)
    # n. of incoming
    incoming = pd.Series(G.out_degree(), name="out_degree")
    node_desc.append(incoming)
    # fold change out/in
    ratio = np.log2(outgoing + 1) - np.log2(incoming + 1)
    node_desc.append(pd.Series(ratio, name="out_in_degree_fold_change"))

    # centrality
    # degree based
    node_desc.append(pd.Series(nx.degree_centrality(G), name="degree_centrality"))
    node_desc.append(pd.Series(nx.in_degree_centrality(G), name="in_degree_centrality"))
    node_desc.append(pd.Series(nx.out_degree_centrality(G), name="out_degree_centrality"))
    # closest-path based
    # node_desc.append(pd.Series(nx.closeness_centrality(G), name="closeness_centrality"))
    # node_desc.append(pd.Series(nx.betweenness_centrality(G), name="betweenness_centrality"))
    # # eigenvector-based
    # node_desc.append(pd.Series(nx.eigenvector_centrality(G), name="eigenvector_centrality"))
    # node_desc.append(pd.Series(nx.katz_centrality_numpy(G), name="katz_centrality"))
    # # load-based
    # node_desc.append(pd.Series(nx.load_centrality(G), name="load_centrality"))

    return (graph_desc, pd.DataFrame(node_desc).T)
Esempio n. 25
0
def stats(graph):
    stats_dict = {}

    stats_dict['N'] = graph.number_of_nodes()
    print("Liczba węzłów: " + str(stats_dict['N']))
    stats_dict['E'] = graph.number_of_edges()
    print("Liczba krawędzi: " + str(stats_dict['E']))
    stats_dict['knn'] = knn(graph)
    print("Średni stopień najbliższego węzła: " + str(stats_dict['knn']))
    stats_dict['corr'] = nx.degree_pearson_correlation_coefficient(graph)
    print("Współczynnik korelacji: " + str(stats_dict['corr']))
    stats_dict['wsp_gron'] = wspolczynnik_gronowania(graph)
    print("Współczynnik gronowania: " + str(stats_dict['wsp_gron']))

    # zdecydowaliśmy się na wykorzystanie funkcji z biblioteki nx, ponieważ działa zdecydowanie szybciej
    stats_dict['avg_dist'] = nx.average_shortest_path_length(graph)
    print("Średni dystans: " + str(stats_dict['avg_dist']))
Esempio n. 26
0
def dia(G):
    print('Диаметр графа, самый длинный путь' + 
          'от одной вершины до другой: ',
          nx.diameter(G))

    print('Коэффициент ассортативности (насколько вся сеть',
          ' завязана на основных "хабах": )',
          nx.degree_pearson_correlation_coefficient(G))

    print('Плотность графа, отношение рёбер и узлов: ',
          nx.density(G))

    print('вот какой коэффициент у нашего графа: ',
          nx.average_clustering(G))
    #print(nx.transitivity(G))

    return clos
Esempio n. 27
0
def feature(node, edge):
    G = nx.MultiGraph()
    G = G.to_directed()
    G.add_nodes_from(node)
    G.add_edges_from(edge)
    f = np.zeros(10)
    f[0] = len(G.nodes)
    f[1] = len(G.edges)
    f[2] = nx.density(G)
    f[3] = nx.degree_pearson_correlation_coefficient(G)
    f[4] = nx.algorithms.reciprocity(G)
    f[5] = 0  #nx.transitivity(G)
    f[6] = nx.is_weakly_connected(G)
    f[7] = nx.number_weakly_connected_components(G)
    f[8] = nx.is_strongly_connected(G)
    f[9] = nx.number_strongly_connected_components(G)
    return f
Esempio n. 28
0
    def graph_descriptor_extractor(self, network_path):

        G = nx.read_pajek(network_path)

        G_is_directed = nx.is_directed(G)

        if G_is_directed:
            G = nx.DiGraph(G)
        else:
            G = nx.Graph(G)

        G_nodes = G.nodes()
        G_edges = G.edges()

        G_num_nodes = len(G_nodes)
        "Descriptor a) number of nodes"
        G_num_edges = len(G_edges)
        "Descriptor b) number of edges"

        sorted_degrees = sorted(nx.degree(G).values())
        G_min_degree = sorted_degrees[0]
        "Descriptor c.i) minimum"
        G_max_degree = sorted_degrees[-1]
        "Descriptor c.ii) maximum"

        if G_is_directed:
            G_avg_degree = G_num_edges / float(G_num_nodes)
            "Descriptor c.iii) average"
        else:
            G_avg_degree = 2.0 * G_num_edges / float(G_num_nodes)
            "Descriptor c.iii) average"

        G_assortativity = nx.degree_pearson_correlation_coefficient(G)
        "Descriptor e) Assortativity"
        G_avg_clustering = nx.average_clustering(G)
        "Descriptor d) Average clustering coefficient"
        G_avg_path_length = nx.average_shortest_path_length(G)
        "Descriptor f) Average path length"
        G_diameter = nx.diameter(G)
        "Descriptor g) Diameter"

        return [
            G_num_nodes, G_num_edges, G_min_degree, G_max_degree, G_avg_degree,
            G_assortativity, G_avg_clustering, G_avg_path_length, G_diameter
        ]
Esempio n. 29
0
def get_info(field):
    d = {
        'Количество узлов ':
        field.number_of_nodes(),
        'Количество рёбер ':
        field.number_of_edges(),
        'Плотность графа ':
        nx.density(field),
        'Диаметр ':
        nx.diameter(field),
        'Радиус ':
        nx.radius(field),
        'Коэффициент кластеризации ':
        nx.average_clustering(field),
        'Коэффициент ассортативности ':
        nx.degree_pearson_correlation_coefficient(field)
    }
    return d
Esempio n. 30
0
def graph_about():
    print('number of nodes {}'.format(nx.number_of_nodes(G)))
    print('number of edges {}'.format(nx.number_of_edges(G)))
    print('radius {}'.format(nx.radius(G)))
    print('diameter {}'.format(nx.diameter(G)))
    print('average_clustering {}'.format(nx.average_clustering(G)))
    print('transitivity {}'.format(nx.transitivity(G)))
    print('density {}'.format(nx.density(G)))
    print('degree_pearson_correlation_coefficient {}'.format(
        nx.degree_pearson_correlation_coefficient(G)))
    deg = nx.degree_centrality(G)
    nodes = []
    for node in sorted(deg, key=deg.get, reverse=True):
        nodes.append(node)
    #print('top central nodes: {}'.format(' '.join(nodes[:20])))
    sub_G = G.subgraph(nodes[:30])  #визуализируем центральные узлы и их связи
    visualize(sub_G, 'graph_top30.png')
    visualize(G, 'graph_total.png')
Esempio n. 31
0
    def stats(self):
        #       include: number nodes; number edges; density; conectively; assortativity
        num_nodes = self._FN.order()
        num_edges = self._FN.size()
        density = nx.density(self._FN)
        connectivity = nx.node_connectivity(self._FN)
        assortativity = nx.degree_pearson_correlation_coefficient(
            self._FN, x='out', y='in', weight='weight')

        stats = {}

        stats['nodes'] = np.round(num_nodes, 0)
        stats['edges'] = np.round(num_edges, 0)
        stats['density'] = np.round(density, 2)
        stats['connectivity'] = np.round(connectivity, 0)
        stats['assortativity'] = np.round(assortativity, 2)

        return pd.Series(stats, name='stats')
def graph_stats(G):
    density = nx.density(G)
    try:
        corr = nx.degree_pearson_correlation_coefficient(G)
    except:
        corr = 0
    avg_neighbor_degree = nx.average_neighbor_degree(G)  #dict
    k_nearest_neighbors = nx.k_nearest_neighbors(G)  #dict
    degree_centrality = nx.degree_centrality(G)  #dict
    info = nx.info(G)
    return {
        'density': density,
        'corr': corr,
        'avg_neighbor_degree': avg_neighbor_degree,
        'k_nearest_neighbors': k_nearest_neighbors,
        'degree_centrality': degree_centrality,
        'info': info
    }
Esempio n. 33
0
def metric_signature(G):
    deg_dist = np.array(list(G.degree()), dtype=int)[:, 1]
    spls = nx.all_pairs_shortest_path_length(G)
    spls = np.array(list(spls))[:, 1]
    spl_dist = []
    for n in spls:
        spl_dist += [item[1] for item in n.items()]
    coms = nx.algorithms.community.modularity_max.greedy_modularity_communities(
        G)
    com_dist = [len(l) for l in list(coms)]
    return {
        'nodes': nx.number_of_nodes(G),
        'edges': nx.number_of_edges(G),
        'assort': nx.degree_pearson_correlation_coefficient(G),
        'avgcc': nx.average_clustering(G),
        'module': nx.algorithms.community.quality.coverage(G, coms),
        'deg_dist': np.array(sorted(deg_dist)),
        'spl_dist': np.array(sorted(spl_dist)),
        'com_dist': np.array(sorted(com_dist))
    }
Esempio n. 34
0
    def assortativity(self):
        result = {}
        result[
            'degree_assortativity_coefficient'] = nx.degree_assortativity_coefficient(
                self.graph)

        if self.directed == 'undirected':
            result[
                'degree_pearson_correlation_coefficient'] = nx.degree_pearson_correlation_coefficient(
                    self.graph)

        result['average_neighbor_degree'] = nx.average_neighbor_degree(
            self.graph)
        result['average_degree_connectivity'] = nx.average_degree_connectivity(
            self.graph)
        result['k_nearest_neighbors'] = nx.k_nearest_neighbors(self.graph)

        fname_assort = self.DIR + '/assortativity.json'
        with open(fname_assort, "w") as f:
            json.dump(result, f, cls=SetEncoder, indent=2)
        print(fname_assort)
Esempio n. 35
0
# print G.node


for row in csv.reader(csvfile):
   G.add_edges_from([(row[0],row[1])],weight=row[2])


print "average_neighbor_degree"
print nx.average_neighbor_degree(G) 

print "degree_assortativity_coefficient"
print nx.degree_assortativity_coefficient(G)

print "degree_pearson_correlation_coefficient"
print nx.degree_pearson_correlation_coefficient(G)  
#print nx.k_nearest_neighbors(G)


print "bipartite.closeness_centrality"
print bipartite.closeness_centrality(G,G.node)

print "degree_centrality"
print nx.degree_centrality(G)

print "betweenness_centrality"
print nx.betweenness_centrality(G)

print "k_nearest_neighbors"
print nx.k_nearest_neighbors(G)
 def _ComputeCorrelationCoefficient(self, graph):
     return networkx.degree_pearson_correlation_coefficient(graph)
	return (indegreeSeq)

def getOutDegreeSeq(G):
	outdegreeSeq = sorted([G.out_degree(u) for u in G.nodes()], reverse=True)
	return (outdegreeSeq)

if __name__=="__main__":
	Gorig = nx.read_edgelist(realNetworkFileName, create_using=nx.DiGraph(), nodetype=int)
	Gregen =  nx.read_edgelist(regeneratedFileName, create_using=nx.DiGraph(), nodetype=int)
	print "Real network loaded with N =", Gorig.number_of_nodes(), "M=", Gorig.number_of_edges()
	print "Regenerated Network loaded with N =", Gregen.number_of_nodes(), "M =", Gregen.number_of_edges()
	print ""

	indegreeSequenceOrig = getInDegreeSeq(Gorig)
	indegreeSequenceRegen = getInDegreeSeq(Gregen)
	outdegreeSequenceOrig = getOutDegreeSeq(Gorig)
	outdegreeSequenceRegen = getOutDegreeSeq(Gregen)

	avInDegree = [np.mean(indegreeSequenceOrig), np.mean(indegreeSequenceRegen)]

	trans = [nx.transitivity(Gorig), nx.transitivity(Gregen)]
	avShortedPathLength = [nx.average_shortest_path_length(Gorig), nx.average_shortest_path_length(Gregen)]
	degreePearson = [nx.degree_pearson_correlation_coefficient(Gorig), nx.degree_pearson_correlation_coefficient(Gregen)]

	print "average in-degree", avInDegree
	print "transitivity", trans
	print "average shortest path", avShortedPathLength
	print "degree pearson", degreePearson


Esempio n. 38
0
def creationVecteur (G):
    v={}
    # Adding nodes
    nn = nx.number_of_nodes(G)
    v["numNodes"]=nn
    
    # Adding edges
    ne = nx.number_of_edges(G)
    v["numEdges"]=ne
    
    # Adding cyclomatic number
    c=nx.number_connected_components(G)
    cyclo = ne-nn+c
    v["numCycles"]=cyclo
    
    # Adding link density
    if nn==1:
        linkdensity="?"
    else:
        linkdensity = 2*ne/((nn-1)*nn)
    v["linkDensity"]=linkdensity
    
    # Adding average degree
    avgdegree = 2*ne/nn
    v["avgDegree"]=avgdegree
    
    # Adding number of leaves
    nl = numberLeaves(G)
    v["numLeafs"]=nl
    
    # Adding histogram of the nodes degree
    v["histDegree0"]=0
    v["histDegree1"]=0
    v["histDegree2"]=0
    v["histDegree3"]=0
    v["histDegree4"]=0
    histDegree=nx.degree_histogram(G)
    v["histDegree0"]=histDegree[0]
    if len(histDegree)>1:
        v["histDegree1"]=histDegree[1]
        if len(histDegree)>2:
            v["histDegree2"]=histDegree[2]
            if len(histDegree)>3:
                v["histDegree3"]=histDegree[3]
                if len(histDegree)>4:
                    v["histDegree4"]=histDegree[4]
  
    # Adding sMetric
    v["sMetric"]= metric(G)
    
    # Adding graph energy
    energ = graphEnergy (G)
    v["graphEnergy"]=energ     
    
    # Adding average of the average neighboring degrees of all nodes
    av = averageNeighDegree(G)
    v["averageNeighDegree"]=av
    
    # Adding average of closeness over all nodes
    v["averageCloseness"]=average_closeness(G)
    
    # Adding pearson coefficient for the degree sequence of all edges of the graph
    pearson = nx.degree_pearson_correlation_coefficient(G)
    if np.isnan(pearson):
        pearson = 0 
    v["pearson"]=pearson

    # Adding rich club metric for all nodes with a degree larger than 1
    rc=richClub(G)
    v["richClub"]=rc
    
    # Adding algebraic connectivity, i.e. the second smallest eigenvalue of the Laplacian
    algConnect = nx.laplacian_spectrum(G)
    algConnect = list(algConnect)
    algConnect = sorted(algConnect)
    v["algConnect"]=algConnect[1]
    
    # Adding diameter of the graph
    if nx.is_connected(G):
        diam = nx.diameter(G)
    
    else:
        diam="?"
    v["diameter"]=diam

    # Adding average shortest path
    if nx.is_connected(G):
        avgShortestPath=nx.average_shortest_path_length(G)
    
    else:
        avgShortestPath="?"
    v["avgShortPath"]=avgShortestPath
    
    # Adding graph radius
    if nx.is_connected(G):
        rad = nx.radius(G)
    else:
        rad="?"
    v["graphRadius"]=rad

    return v
Esempio n. 39
0
def main(argv):
	mode=argv[1]
	path="net.net"
	G=nx.read_pajek(path)
	G=nx.Graph(G)
#-------------1--------------------------	
	Nnodes = G.number_of_nodes()
	Nedges = G.number_of_edges()

	
	degree=list(G.degree().values())
	meandeg=0
	for x in degree:
		meandeg+=x
	meandeg=meandeg/float(Nnodes)
	if mode=="1":
		print "number of nodes: ", Nnodes
		print "number of links: ",Nedges
		print "average degree: ", meandeg
		plt.title("degree distribution")
		plt.hist(degree)	
#------------------------------------------------
	
#------------------2---------------------------
	clustering=list(nx.clustering(G).values())
	ave_cluster=nx.average_clustering(G)
	if mode=="2":
		print "average clustering: ",ave_cluster
		print "-------------------------"
		print "-----ER-Network--------------"
		print "average degree: ", 2*Nedges/float(Nnodes)
		print "-------------------------"
		plt.title("clustering distribution")
		plt.bar(np.linspace(0,18,19),clustering)
	
	cluster_degree=[]
	i=0
	for x in degree:
		if x > 1:
			cluster_degree.append(x)
		else:
			clustering.pop(i)
		i+=1
	
	if mode == "3":	
		plt.title("degree over clustering coefficient")
		plt.plot(clustering,cluster_degree,'o')
#-----------------4---------------------
	neighbours=list(nx.k_nearest_neighbors(G).values())
	if mode=="4":
		print("average neares neighbour degree: ",sum(neighbours)/float(Nnodes))
#------------------5---------------------------
	r=nx.degree_pearson_correlation_coefficient(G)
	if mode=="5":
		print("r: ",r)
#-------------------6-------------------------
	eig_cent=list(nx.eigenvector_centrality(G).values())
	eig_top=sorted(range(len(eig_cent)), key=lambda i:eig_cent[i])[-7:]
	deg_top=sorted(range(len(degree)), key=lambda i:degree[i])[-7:]
	if mode=="6":
		print("degree top 7:",deg_top)
		print("eigenvector centrality top 7: ", eig_top)
#-----------------7--------------------
	pagerank_1=list(nx.pagerank(G,alpha=0.1).values())
	pagerank_2=list(nx.pagerank(G,alpha=0.85).values())
	pagerank_3=list(nx.pagerank(G,alpha=0.99).values())
	if mode=="7":
		plt.plot(range(Nnodes),pagerank_1,'r')
		plt.plot(range(Nnodes),pagerank_2,'g')
		plt.plot(range(Nnodes),pagerank_3,'y')
		
	
	if mode=="8":
		Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)[0]
		pos=nx.spring_layout(Gcc)
		nx.draw_networkx_nodes(Gcc,pos,node_size=20)
		nx.draw_networkx_edges(Gcc,pos,alpha=0.4)

	
	plt.show()
	else:
		newGraph = nx.Graph()
		newGraph.add_nodes_from(graph)

		for edge in graph.edges():
			newGraph.add_edge( edge[0], edge[1] )

	graph = newGraph

	print "Checking Network Type *****************************************************\n\n"
	graphType = graph.is_directed()

	print "Calculating Pearson Correlation Coefficient on Degree-Degree Associativity \n\n"
	if graphType:
		degreeAssoc = nx.degree_pearson_correlation_coefficient(graph,x='in',y='out')

	else:
		degreeAssoc = nx.degree_pearson_correlation_coefficient(graph)

	print "Creating Adjacency Matrix Plots *******************************************\n\n"
	plt.hold(True)
	for edge in graph.edges_iter():
		plt.plot(edge[0],edge[1], 'bo')

	plt.title( graphName + " Adjacency Matrix Plot" )
	plt.show()
	plt.hold(False)

	print "Creating Network plot *****************************************************\n\n"
	if graphName == 'Dolphin Social' or graphName == 'Karate Social' or graphName == 'Political Books':
 def test_degree_assortativity_multigraph(self):
     r=nx.degree_pearson_correlation_coefficient(self.M)
     npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
 def test_degree_assortativity_directed(self):
     r=nx.degree_pearson_correlation_coefficient(self.D)
     npt.assert_almost_equal(r,-0.57735,decimal=4)
"""
Plot distribution of counts for session lengths
"""
# plt.hist(user_df.session_length, list(range(1, 200)), weights=user_df.user_cnt); plt.show()

G = session_transition_graph(log)
assert(G.number_of_nodes() == 14457)
assert(G.number_of_edges() == 27315)

"""
Plot graph of user sessions parcours
"""
# pos = nx.spring_layout(G); nx.draw_networkx(G, pos, with_labels=False, node_size=1); plt.show()

print("degree_assortativity_coefficient %2.2f" % nx.degree_assortativity_coefficient(G))
print("degree_pearson_correlation_coefficient %2.2f" % nx.degree_pearson_correlation_coefficient(G))

assert(not nx.is_connected(G))
assert(nx.number_connected_components(G) == 171)

counter = Counter([c.number_of_edges() for c in nx.connected_component_subgraphs(G)])
print(counter)
# Counter({1: 141, 2: 19, 3: 4, 5: 2, 6: 2, 4: 1, 27085: 1, 13: 1})

large_graphs = [c for c in nx.connected_component_subgraphs(G) if c.number_of_edges() > 20]
largest_graph = large_graphs[0]
#nx.write_gexf(largest_graph, './model/user_sessions.gexf')
colors = ['r' if 'label' in d and d['label'] == 1 else 'b' for n, d in largest_graph.nodes_iter(data=True)]
sizes = [50 if 'label' in d and d['label'] == 1 else 1 for n, d in largest_graph.nodes_iter(data=True)]
#pos = nx.spring_layout(largest_graph); nx.draw_networkx(largest_graph, pos, with_labels=False, colors=colors, node_size=sizes); plt.show()
def neighbour_assortativity(calculate):
    nodes_degree = G.degree()
    degrees = set(map(lambda n: nodes_degree[n], nodes_degree))
    result = {}

    for degree in degrees:
        neighbors_d = neighbors_degree([node for node, node_degree in nodes_degree.items() if node_degree == degree])
        if len(neighbors_d) == 0:
            calculation = 0
        else:
            calculation = calculate(neighbors_d)
        result.update({degree: calculation})
    return result


print("Pearson: " + str(nx.degree_pearson_correlation_coefficient(G)))
print('')

# Filtracja odstających wierzchołków
G.remove_nodes_from([k for k, v in G.degree().items() if v > 500])
print("Filtracja grafu")
print("")

print("Pearson: " + str(nx.degree_pearson_correlation_coefficient(G)))
print('')

# Wyświetl wykres średniej stopnia sąsiedztwa
val = neighbour_assortativity(statistics.mean)
plt.plot(list(map(lambda i: i[0], val.items())), list(map(lambda i: i[1], val.items())), 'ro', numpy.arange(0, 100),
         numpy.arange(0, 100))
plt.axis([0, 150, 0, 40])
Esempio n. 45
0
 def calculatePearsonCorrelationCoefficient(self):
     return nx.degree_pearson_correlation_coefficient(self.amazonGraph)
 def test_degree_assortativity_undirected(self):
     r=nx.degree_pearson_correlation_coefficient(self.P4)
     npt.assert_almost_equal(r,-1.0/2,decimal=4)