Esempio n. 1
0
 def computeFeatures(self, G, features_nx, features_nk):
 
     if("effective_graph_resistance" in features_nx or "nb_spanning_trees" in features_nx or "algebraic_connectivity" in features_nx):
         if(self.params["verbose"]):
             print("Computing laplacian_eigenvalues")
             s = time.time()
         self.eigenvalues["laplacian"] = np.real(nx.laplacian_spectrum(G))
         if(self.params["verbose"]):
             print("Finish laplacian_eigenvalues (%s)" % (timeFormat(time.time()-s)))
             
     if("largest_eigenvalue" in features_nx or "symmetry_ratio" in features_nx or "natural_connectivity" in features_nx):
         if(self.params["verbose"]):
             print("Computing adjacency_eigenvalues")
             s = time.time()
         self.eigenvalues["adjacency"] = np.real(nx.adjacency_spectrum(G))
         if(self.params["verbose"]):
             print("Finish adjacency_eigenvalues (%s)" % (timeFormat(time.time()-s)))
             
     if("weighted_spectrum_3" in features_nx or "weighted_spectrum_4" in features_nx):
         if(self.params["verbose"]):
             print("Computing normalized_laplacian_eigenvalues")
             s = time.time()
         self.eigenvalues["normalized_laplacian"] = np.real(nx.normalized_laplacian_spectrum(G))
         if(self.params["verbose"]):
             print("Finish normalized_laplacian_eigenvalues (%s)" % (timeFormat(time.time()-s)))
     
     return(NodesFeatures.computeFeatures(self, G, features_nx, features_nk))
Esempio n. 2
0
    def detect(self):
        """detect the source with Dynamic Importance.

        Returns:
            @rtype:int
            the detected source centrality
        """
        self.reset_centrality()
        adjacent_matrix = nx.adjacency_matrix(self.subgraph,
                                              weight='weight').toarray()
        eigenvalues = nx.adjacency_spectrum(self.subgraph, weight='weight')
        eigenvalue_max = max(eigenvalues)
        i = 0
        for u in nx.nodes(self.subgraph):
            adjacent_matrix_new = np.delete(adjacent_matrix, i,
                                            0)  # remove the row for node u
            adjacent_matrix_new = np.delete(adjacent_matrix_new, i,
                                            1)  # remove the column for node u
            eigenvalue_max_new = max(numpy.linalg.eigvals(adjacent_matrix_new))
            nx.set_node_attributes(
                self.subgraph, 'centrality',
                {u: abs(eigenvalue_max - eigenvalue_max_new) / eigenvalue_max})
            i += 1

        return self.sort_nodes_by_centrality()
Esempio n. 3
0
def largest_eigenvalue(G, nodes, eigenvalues=None):
    adjacency_eigenvalues = None
    if(not eigenvalues is None):
        adjacency_eigenvalues = eigenvalues["adjacency"]
    if(adjacency_eigenvalues is None):
        adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))
    return(np.float64(max(adjacency_eigenvalues)))
	def create_graph(self):
		g = nx.Graph()
		duplicated_nodes_list = self.only_nodes.iloc[:,0].append(self.only_nodes.iloc[:,1]).reset_index(drop=True)
		nodes_list = duplicated_nodes_list.values.tolist()
		No_duplicate_nodes = set(nodes_list)
		# print(len(No_duplicate_nodes))#327
		g.add_nodes_from(No_duplicate_nodes)
		g.add_edges_from(self.No_duplicate_links)
		# nx.draw(g,node_size = 1.5)#with_labels=True
		# plt.draw()
		# plt.show()
		link_density = nx.density(g)
		# print(link_density)#0.109
		average_degree = nx.average_neighbor_degree(g)
		# numbers degreeede= [average_degree[key] for key in average_degree]
		# mean = statistics.mean(numbers)
		# var = statistics.variance(numbers)
		# print(var)
		degree_correlation = nx.degree_pearson_correlation_coefficient(g) 
		# print(degree_correlation)#0.033175769936049336
		average_clustering = nx.average_clustering(g)
		# print(average_clustering)#0.5035048191728447
		average_hopcount = nx.average_shortest_path_length(g)
		# print(average_hopcount)#2.1594341569576554
		diameter = nx.diameter(g)
		# print(diameter)#4
		# A = nx.adjacency_matrix(g)
		A_eigenvalue = nx.adjacency_spectrum(g)
		# print(max(A_eigenvalue))#(41.231605032525835+0j)
		G_eigenvalue = nx.laplacian_spectrum(g)
		# print(sorted(G_eigenvalue))#1.9300488624481513
		return g, nodes_list, No_duplicate_nodes, link_density, average_degree
Esempio n. 5
0
def network_metrics(network, network_red, n_fibres, tag=''):
    """Analyse networkx Graph object"""

    database = pd.Series(dtype=object)

    database['No. Fibres'] = n_fibres

    cross_links = np.array([degree[1] for degree in network.degree], dtype=int)
    database[f"{tag} Network Cross-Link Density"] = ((cross_links > 2).sum() /
                                                     n_fibres)

    try:
        value = nx.degree_pearson_correlation_coefficient(network,
                                                          weight='r')**2
    except Exception as err:
        logger.debug(f'Network Degree calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Degree"] = value

    try:
        value = np.real(nx.adjacency_spectrum(network_red).max())
    except Exception as err:
        logger.debug(f'Network Eigenvalue calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Eigenvalue"] = value

    try:
        value = nx.algebraic_connectivity(network_red, weight='r')
    except Exception as err:
        logger.debug(f'Network Connectivity calculation failed: {str(err)}')
        value = None
    database[f"{tag} Network Connectivity"] = value

    return database
Esempio n. 6
0
 def spectrum(self) -> np.ndarray:
     """Calculate and cache adjacency spectrum
     (sorted in decreasing order).
     """
     _spectrum = nx.adjacency_spectrum(self.G)
     idx = _spectrum.argsort()[::-1]
     return np.real(_spectrum[idx])
Esempio n. 7
0
def graphEnergy (G):
    energy=0
    adjSpectre = nx.adjacency_spectrum(G)
    for elt in adjSpectre:
        if type(elt)!=complex:
            energy+=abs(elt)
    return energy
Esempio n. 8
0
def main():
    """Interrograte the g560 graph.
    """
    # g560 = make_symmetrical("/Users/rjs/dev/g560/embeddings/Gewirtz_graph_embeddings_4.svg")
    # print("g560 (a.k.a GP-graph)")
    g560 = make()
    print("=====================")
    print()
    print("Number of nodes :", len(g560))
    print("Number of edges :", len(g560.edges))
    print("Diameter        :", diameter(g560))
    print("Radius          :", radius(g560))
    print("Average shortest path length :", average_shortest_path_length(g560))

    #code.interact(local=locals())

    eccentricities = Counter(eccentricity(g560).values())

    print("Eccentricities")
    for e in sorted(eccentricities):
        print("  {} for {} nodes".format(e, eccentricities[e]))

    print("Adjacency spectrum :", adjacency_spectrum(g560))

    text_matrix = '\n'.join(''.join(" X"[g560.has_edge(u, v)] for v in g560.nodes) for u in g560.nodes)

    print(text_matrix)

    return 0
Esempio n. 9
0
def graphEnergy(G):
    energy = 0
    adjSpectre = nx.adjacency_spectrum(G)
    for elt in adjSpectre:
        if type(elt) != complex:
            energy += abs(elt)
    return energy
    def data_analysis(self, graph):
        data_vec = [0] * 13
        num_vertex = nx.number_of_nodes(graph)

        data_vec[0] = nx.average_clustering(graph)

        sq_values = list(nx.square_clustering(graph).values())
        data_vec[1] = sum(sq_values) / len(sq_values)

        g = nx.path_graph(num_vertex)
        data_vec[2] = nx.average_shortest_path_length(
            graph) / nx.average_shortest_path_length(g)

        data_vec[3] = nx.degree_pearson_correlation_coefficient(graph)
        if math.isnan(data_vec[3]) is True:
            data_vec[3] = 0

        data_vec[4] = nx.diameter(graph) / (num_vertex - 1)
        data_vec[5] = nx.density(graph)

        data_vec[6] = nx.edge_connectivity(graph) / (num_vertex - 1)

        g = nx.star_graph(num_vertex - 1)
        Freeman_degree_norm = self.freeman_centralization(
            nx.degree_centrality(g))
        Freeman_close_norm = self.freeman_centralization(
            nx.closeness_centrality(g))
        Freeman_between_norm = self.freeman_centralization(
            nx.betweenness_centrality(g))
        # need to change
        Freeman_eigen_norm = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(g))

        data_vec[7] = self.freeman_centralization(
            nx.degree_centrality(graph)) / Freeman_degree_norm
        data_vec[8] = self.freeman_centralization(
            nx.closeness_centrality(graph)) / Freeman_close_norm
        data_vec[9] = self.freeman_centralization(
            nx.betweenness_centrality(graph)) / Freeman_between_norm

        # warning, the way it normalized may not correct
        data_vec[10] = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(graph)) / Freeman_eigen_norm

        egvl_lap = nx.laplacian_spectrum(graph)
        egvl_lap = np.sort(egvl_lap)
        egvl_lap = np.delete(egvl_lap, 0, 0)
        summ = 0
        for mu in egvl_lap:
            summ += (1 / mu)

        summ = summ * num_vertex
        data_vec[11] = (num_vertex - 1) / summ

        # for simple graph(adj matrix is symmetric), eigenvalue must be real number.
        egvl_adj = np.real(nx.adjacency_spectrum(graph))
        data_vec[12] = max(egvl_adj) / (num_vertex - 1)

        return data_vec
Esempio n. 11
0
def natural_connectivity(G, nodes, eigenvalues=None):
    adjacency_eigenvalues = None
    if(not eigenvalues is None):
        adjacency_eigenvalues = eigenvalues["adjacency"]
    if(adjacency_eigenvalues is None):
        adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))
    nc = np.log(np.mean(np.exp(adjacency_eigenvalues)))
    return(np.float64(nc))
Esempio n. 12
0
def symmetry_ratio(G, nodes, eigenvalues=None):
    adjacency_eigenvalues = None
    if(not eigenvalues is None):
        adjacency_eigenvalues = eigenvalues["adjacency"]
    if(adjacency_eigenvalues is None):
        adjacency_eigenvalues = np.real(nx.adjacency_spectrum(G))
    r = len(np.unique(adjacency_eigenvalues))/(diameter(G, nodes)+1)
    return(np.float64(r))
Esempio n. 13
0
def sorted_adjacency_spectrum(
        G: Union[nx.Graph, nx.MultiDiGraph]) -> np.ndarray:
    """Calculate adjacency spectrum of G
    (sorted in decreasing order).
    """
    _spectrum = nx.adjacency_spectrum(G)
    idx = _spectrum.argsort()[::-1]
    return np.real(_spectrum[idx])
Esempio n. 14
0
def spectrum(aGraph):
    spectrum = nx.adjacency_spectrum(aGraph)
    l = len(spectrum)
    freq = {}
    for eigenvalue in spectrum:
        freq[eigenvalue] = 0
    for eigenvalue in spectrum:
        freq[eigenvalue] += 1. / l
    return freq
Esempio n. 15
0
def AdjSpectrum(G):
    """
    Calculate the eigenvalues of the adjacency matrix of a network
    :param G: [networkx graph object] this is a networkx graph object
    :return: [list] Returns a list of the real part of the eigenvalues of the adjacency matrix of G
    """
    eig_temp = nx.adjacency_spectrum(G)
    eig = [x.real for x in eig_temp]
    return eig
Esempio n. 16
0
def katz_strategy(graph, num_seeds, num_rounds):
    highest_katz = []
    lam_max = max(nx.adjacency_spectrum(graph))
    node_values = nx.katz_centrality_numpy(graph, 1 / int(lam_max))

    top_katz = sorted(node_values.items(),
                      key=operator.itemgetter(1),
                      reverse=True)[:num_seeds]
    highest_katz = [i[0] for i in top_katz]
    return (highest_katz * num_rounds)
Esempio n. 17
0
def dynamical_importance(G):
    """Compute dynamical importance for G.

    Ref: Restrepo, Ott, Hunt. Phys. Rev. Lett. 97, 094102 (2006)
    """
    # spectrum of the original graph
    eigvals = nx.adjacency_spectrum(G)
    lambda0 = eigvals[0]
    # Now, loop over all nodes in G, and for each, make a copy of G, remove
    # that node, and compute the change in lambda.
    nnod = G.number_of_nodes()
    dyimp = np.empty(nnod,float)
    for n in range(nnod):
        gn = G.copy()
        gn.remove_node(n)
        lambda_n = nx.adjacency_spectrum(gn)[0]
        dyimp[n] = lambda0 - lambda_n
    # Final normalization
    dyimp /= lambda0
    return dyimp
Esempio n. 18
0
    def adj_eigenvalues(self):
        """
        Returns the eigenvalues of the Adjacency matrix
        :return:
        """
        CP.print_none('Calculating eigenvalues of Adjacency Matrix')

        adj_eigenvalues = nx.adjacency_spectrum(self.graph)
        self.stats['adj_eigenvalues'] = adj_eigenvalues

        return adj_eigenvalues
Esempio n. 19
0
def dynamical_importance(graph):
    """Compute dynamical importance for graph.

    Ref: Restrepo, Ott, Hunt. Phys. Rev. Lett. 97, 094102 (2006)
    """
    # spectrum of the original graph
    eigvals = nx.adjacency_spectrum(graph)
    lambda0 = eigvals[0]
    # Now, loop over all nodes in graph, and for each, make a copy of graph, remove
    # that node, and compute the change in lambda.
    nnod = graph.number_of_nodes()
    dyimp = np.empty(nnod, float)
    for n in range(nnod):
        gn = graph.copy()
        gn.remove_node(n)
        lambda_n = nx.adjacency_spectrum(gn)[0]
        dyimp[n] = lambda0 - lambda_n
    # Final normalization
    dyimp /= lambda0
    return dyimp
Esempio n. 20
0
def ct_stats():
    # import the correlation network
    H = bnx.build_nx()

    close_ct_d = nx.closeness_centrality(H, distance="weight")
    between_ct_d = nx.betweenness_centrality(H, weight="weight")
    degree_ct_d = nx.degree_centrality(H)
    katz_ct_d = nx.katz_centrality(
        H,
        weight="weight",
        alpha=1 / (max(nx.adjacency_spectrum(H)) + 1),
        beta=close_ct_d,
    )

    degree_ct_s = pd.Series(degree_ct_d).round(3)
    close_ct_s = pd.Series(close_ct_d).round(3)
    between_ct_s = pd.Series(between_ct_d).round(3)
    katz_ct_s = pd.Series(katz_ct_d).round(3).astype("float")

    close_ct_s.reset_index()
    degree_ct_s.reset_index()
    between_ct_s.reset_index()
    katz_ct_s.reset_index()

    degree_ct_df = (pd.DataFrame({
        "stock_rank_1": degree_ct_s.index,
        "degree": degree_ct_s.values
    }).sort_values(by="degree", ascending=True).reset_index().drop("index",
                                                                   axis=1))

    close_ct_df = (pd.DataFrame({
        "stock_rank_2": close_ct_s.index,
        "closeness": close_ct_s.values
    }).sort_values(by="closeness", ascending=True).reset_index().drop("index",
                                                                      axis=1))

    between_ct_df = (pd.DataFrame({
        "stock_rank_3": between_ct_s.index,
        "between": between_ct_s.values
    }).sort_values(by="between", ascending=True).reset_index().drop("index",
                                                                    axis=1))

    katz_ct_df = (pd.DataFrame({
        "stock_rank_4": katz_ct_s.index,
        "katz": katz_ct_s.values
    }).sort_values(by="katz", ascending=True).reset_index().drop("index",
                                                                 axis=1))

    df1 = degree_ct_df.join(close_ct_df)
    df2 = df1.join(between_ct_df)
    ct_df = df2.join(katz_ct_df)
    return print(ct_df)
def create(degree, nodes):
    # Returns a pair of PING
    if degree > nodes - 1:
        print("Error: Degree must be at most (nodes-1)")
        return 0
    if nodes % 2 != 0:
        print("Error: Nodes must be a multiple of 2")
        return 0

    # node_list = range(nodes)
    node_list = list(range(nodes))
    # print(node_list)

    while (True):
        random.shuffle(node_list)
        # Generate first PING graph
        seed = 1
        g1 = nx.random_regular_graph(degree, nodes, seed=seed)
        g2 = copy.deepcopy(g1)
        # for new_node in [nodes + 1, nodes + 2]:
        g1.add_node(nodes + 1)
        g2.add_node(nodes + 1)
        for connect_to_node in node_list[:int(nodes / 2)]:
            g1.add_edge(nodes + 1, connect_to_node)
        for connect_to_node in node_list[int(nodes / 2):]:
            g2.add_edge(nodes + 1, connect_to_node)
        isomorphic = nx.is_isomorphic(g1, g2)
        cospectral_dist = abs(
            cosine(nx.adjacency_spectrum(g1), nx.adjacency_spectrum(g2)))
        if not isomorphic and cospectral_dist < 0.01:
            return g1, g2

    if nx.is_isomorphic(g1, g2):
        print(
            "Error: Cannot create a PING for this combination of degree and nodes"
        )
        return 0

    return g1, g2
Esempio n. 22
0
def plot_spectral_adj(graph, filename, label, outpath):
    """ plot spectral distribution of adjacency matrix"""
    if not graph:
        graph = nx.read_edgelist(filename)
    spectrals = nx.adjacency_spectrum(graph)
    spectrals_unique = np.unique(spectrals, return_counts=True)
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.bar(spectrals_unique[0], spectrals_unique[1])
    ax.set_title('spectral distribution of adjacency matrix')
    ax.set_xlabel('eigenvalue')
    ax.set_ylabel('frequency')
    plt.savefig(outpath + label + '-spectral-adj.svg')
Esempio n. 23
0
    def preprocess_transition_probs(self):
        '''
		Preprocessing of transition probabilities for guiding the random walks.
		'''
        G = self.G
        is_directed = self.is_directed

        # Compute eigenvector centrality
        #centrality = nx.eigenvector_centrality_numpy(G, weight='weight')
        #perron = max(nx.adjacency_spectrum(G, 'weight'))
        centrality = nx.eigenvector_centrality_numpy(G)
        perron = max(nx.adjacency_spectrum(G))
        #nx.draw(G)

        alias_nodes = {}
        for node in G.nodes():
            #(centrality[nbr] / centrality[node])
            #unnormalized_probs = [np.exp(-G[node][nbr]['weight'])*(centrality[nbr] / centrality[node])/perron for nbr in sorted(G.neighbors(node))]
            #unnormalized_probs = [G[node][nbr]['weight']*(centrality[nbr] / centrality[node])/perron for nbr in sorted(G.neighbors(node))]
            #unnormalized_probs = [(centrality[nbr] / centrality[node]) / perron for nbr in sorted(G.neighbors(node))]
            unnormalized_probs = [
                G[node][nbr]['weight'] for nbr in sorted(G.neighbors(node))
            ]
            norm_const = sum(unnormalized_probs)
            normalized_probs = [
                float(u_prob) / norm_const for u_prob in unnormalized_probs
            ]
            alias_nodes[node] = alias_setup(normalized_probs)

        alias_edges = {}
        triads = {}

        # Add self-loops as edges
        #for node in G.nodes():
        #	G.add_edge(node,node, weight=1.0)

        if is_directed:
            for edge in G.edges():
                alias_edges[edge] = self.get_alias_edge(
                    edge[0], edge[1], centrality, perron)
        else:
            for edge in G.edges():
                alias_edges[edge] = self.get_alias_edge(
                    edge[0], edge[1], centrality, perron)
                alias_edges[(edge[1], edge[0])] = self.get_alias_edge(
                    edge[1], edge[0], centrality, perron)

        self.alias_nodes = alias_nodes
        self.alias_edges = alias_edges

        return
Esempio n. 24
0
    def spectral_gap(self):
        """ Spectral gap. Difference in the first and second eigenvalue of 
        the adj matrix
        
        Returns
        -------
        spectral_gap : float
            Spectral gap
        """

        eig = nx.adjacency_spectrum(self)
        spectral_gap = eig[0] - eig[1]

        return spectral_gap.real
Esempio n. 25
0
    def get_center(self, tempGraph):

        import math
        # Kaza中心性
        # G = nx.path_graph(4)
        maxnumber = max(nx.adjacency_spectrum(tempGraph))
        print(maxnumber)
        phi = (1 + math.sqrt(5)) / 2.0  # largest eigenvalue of adj matrix
        centrality = nx.katz_centrality(tempGraph, 1 / maxnumber - 0.01)
        katz_centrality = sorted(centrality.items(),
                                 key=lambda x: x[1],
                                 reverse=True)
        print('katz_centrality', katz_centrality)
        self.center = katz_centrality[0][0]
Esempio n. 26
0
def RBS(G, α=0.95, K=20):
    '''Nx2K Feature Matrix with in & out paths of length `K` for every node. Column convergence is weighted by α.'''
    A = nx.adjacency_matrix(G)
    λ = np.max(nx.adjacency_spectrum(G))
    β = np.real(α / λ)

    n = len(G.nodes)
    X = np.zeros((n, 2 * K))
    for l in range(K):
        # is it efficient to elevate the matrix to its power everytime?
        X[:, l] = ((β * np.transpose(A))**(l + 1)).toarray().sum(axis=1)
        X[:, l + K] = ((β * A)**(l + 1)).toarray().sum(axis=1)

    return X
Esempio n. 27
0
def create(degree, nodes):
    # Returns a pair of PING
    if degree > nodes - 1:
        print "Error: Degree must be at most (nodes-1)"
        return 0
    if nodes % 2 != 0:
        print "Error: Nodes must be a multiple of 2"
        return 0

    node_list = range(nodes)

    # TODO: Find a better way to find a PING graph. Avoid using factorial or while(True)
    # for _ in xrange(factorial(nodes)):
    while (True):
        random.shuffle(node_list)
        # Generate first PING graph
        seed = 1
        g1 = nx.random_regular_graph(degree, nodes, seed=seed)
        g2 = copy.deepcopy(g1)
        # for new_node in [nodes + 1, nodes + 2]:
        g1.add_node(nodes + 1)
        g2.add_node(nodes + 1)
        for connect_to_node in node_list[:nodes / 2]:
            g1.add_edge(nodes + 1, connect_to_node)
        for connect_to_node in node_list[nodes / 2:]:
            g2.add_edge(nodes + 1, connect_to_node)
        isomorphic = nx.is_isomorphic(g1, g2)
        cospectral_dist = abs(
            cosine(nx.adjacency_spectrum(g1), nx.adjacency_spectrum(g2)))
        if not isomorphic and cospectral_dist < 0.01:
            return g1, g2

    if nx.is_isomorphic(g1, g2):
        print "Error: Cannot create a PING for this combination of degree and nodes"
        return 0

    return g1, g2
Esempio n. 28
0
    def spectral_gap(self):
        """
        Spectral gap. Difference in the first and second eigenvalue of
        the adjacency matrix

        Returns
        -------
        Spectral gap (float)
        
        """

        eig = nx.adjacency_spectrum(self.to_undirected())
        spectral_gap = abs(eig[0] - eig[1])

        return spectral_gap.real
Esempio n. 29
0
def getStatsA(graph):
    getMean = lambda container: reduce(lambda i,j: i+j, container)/float(len(container))
    getVariance = lambda container,mean: getMean(map(lambda x: pow(float(x)-mean, 2), container))
    degreeNodes = graph.degree(graph.nodes()).values()

    numOfNodes = len(graph.nodes())
    numOfEdges = len(graph.edges())
    graphDensity = 2*numOfEdges/float(numOfNodes*(numOfNodes-1))
    averageDegree = getMean(degreeNodes)
    varianceDegree = getVariance(degreeNodes, averageDegree)


    print("Exercise 1:")
    print("Number of nodes: ", str(numOfNodes))
    print("Number of edges: ", str(numOfEdges))
    print("Link density: ", str(graphDensity))
    print("Average degree: ", str(averageDegree))
    print("Variance degree: ", str(varianceDegree))

    print("Exercise 2:")

    assortativity = nx.degree_assortativity_coefficient(graph)
    print("Exercise 3:")
    print("Assortativity: ", str(assortativity), "Positive value indications that there is a correlation between nodes of similar degree," \
                                                    + " while negative values indicate that there is a correlation between nodes of different degree.")

    clusteringCoefficient = nx.average_clustering(graph)    
    print("Exercise 4:")
    print("Average clustering coefficient: ", str(clusteringCoefficient))

    averageHopCount = nx.average_shortest_path_length(graph)
    diameter = nx.diameter(graph)
    print("Exercise 5:")
    print("Average hop count: ", str(averageHopCount))
    print("Diameter:", str(diameter))

    print("Exercise 6:")

    adjacencySpectrum = sorted(nx.adjacency_spectrum(graph))
    print("Exercise 7:")
    print("Spectral radius (largest eigenvalue of the adjacency matrix):", str(adjacencySpectrum[-1]))

    laplacianSpectrum = sorted(nx.laplacian_spectrum(graph))
    print("Exercise 8:")
    print("Algebraic connectivity (second largest eigenvalue of the laplacian matrix):", str(laplacianSpectrum[-2]))
Esempio n. 30
0
def compute_features(G,checks=False):
    """ Computes features of the graph. """

    n = G.order()
    
    nmeval = max(nx.adjacency_spectrum(G)) / n
    comps = nx.number_connected_components(G)
    mis = len(nx.maximal_independent_set(G)) / n
    density = nx.density(G)
    cc = nx.average_clustering(G)
    tris = sum(nx.triangles(G).values()) / n
    fracdeg1 = sum([len(G.neighbors(u)) == 1 for u in G]) / n
    fracdeg0 = sum([len(G.neighbors(u)) == 0 for u in G]) / n

    Gcc_list = list(nx.connected_component_subgraphs(G))
    Gcc = Gcc_list[0]
    ngcc = Gcc.order()
    mgcc = Gcc.size()
    
    return (nmeval,comps,mis,density,cc,tris,fracdeg1,fracdeg0,ngcc,mgcc)
Esempio n. 31
0
def spectral_gap(G):
    """
    Spectral gap
    
    Difference in the first and second eigenvalue of the adjacency matrix
    
    Parameters
    ----------
    G: networkx MultiDiGraph
        Graph
        
    Returns
    -------
    Spectral gap (float)
    
    """
    uG = G.to_undirected()  # uses an undirected graph
    eig = nx.adjacency_spectrum(uG)
    spectral_gap = abs(eig[0] - eig[1])

    return spectral_gap.real
Esempio n. 32
0
def adjSpec(graphDic):
    """
    for each base graph, we take all of its samples and canculate for each one its eigenvalue of its adjacency matrix
    draws a graph were :
        the vertical axis is the graphs (according their Ts),
        the horizontal axis there are the eigenvalues.
        each graph (usually) has more than one eigenvalue, which are represented by dots
    :param graphDic: dictionary of a base graph with the samples ( key: T, value:graph )
    :return:nothing. draws .
    """

    t_list = np.array([])
    eigenval_list = np.array([])
    #for each t all its eigenvalues (eg) in a parallel list
    for T in graphDic.keys():
        for ev in np.nditer(nx.adjacency_spectrum(graphDic[T])):
            t_list = np.append(t_list, T)
            eigenval_list = np.append(eigenval_list, ev)

    #draw
    plt.plot(t_list, eigenval_list, 'ro')
    plt.show
    return 1
Esempio n. 33
0
def do_centrality():
    df = pd.read_table('../HumanNet_all_uniq.tsv',
                       sep='\t',
                       header=None,
                       names=['src', 'dest'],
                       index_col=None)
    G = construct_graph(df, directed=False)
    print "constructed graph..."
    #all_nodes = nx.nodes(G)
    #in_degree, out_degree, closeness, between = centrality(G)
    #central_df = pd.DataFrame(index=all_nodes)
    #central_df['closeness'] = pd.Series(closeness)
    #central_df['between'] = pd.Series(between)
    # largest eigenvalue of the adjacency matrix
    central_df = pd.read_table('../HumanNet_centrality.tsv',
                               sep='\t',
                               header=0,
                               index_col=0)
    max_eigenval = max(nx.adjacency_spectrum(G))
    print "max eigen value", max_eigenval
    central_df['katz'] = pd.Series(
        nx.katz_centrality_numpy(G, alpha=1 / (max_eigenval.real + 1)))
    central_df.to_csv('../HumanNet_centrality_updated.tsv', sep='\t')
Esempio n. 34
0
 def ComputeAdjacencyMatrixEigenvalues(self):
     self.adjacencySpectrum =  np.array(nx.adjacency_spectrum(self.G)).real
     self.adjacencySpectrum.sort()
Esempio n. 35
0
 print "degree_assortativity_coefficient",'\t\t', nx.degree_assortativity_coefficient(graphs[g])
 print "assortativity.average_degree_connectivity",'\t\t', nx.assortativity.average_degree_connectivity(graphs[g])
 #print "degree_pearson_correlation_coefficient",'\t\t', nx.degree_pearson_correlation_coefficient(graphs[g])
 print "node closeness_centrality",'\t\t\t', get_avg(nx.closeness_centrality(graphs[g]))
 print "clustering",'\t\t\t', get_avg(nx.clustering(graphs[g]))
 print "node betweeness",'\t\t\t', get_avg(nx.betweenness_centrality(graphs[g],normalized=False,endpoints=False))
 print "edge betweeness",'\t\t\t', get_avg(nx.edge_betweenness_centrality(graphs[g],normalized=False))
 #print "spectral_bipartivity",'\t\t', bipartite.spectral_bipartivity(graphs[g])
 #print "node betweeness normalized",'\t\t\t', get_avg(nx.betweenness_centrality(graphs[g],normalized=True,endpoints=False))
 #print "edge betweeness normalized",'\t\t\t', get_avg(nx.edge_betweenness_centrality(graphs[g],normalized=True))
 #print "node closeness_vitality",'\t\t\t', get_avg(nx.closeness_vitality(graphs[g]))
 #print "communicability_centrality",'\t\t', get_avg(nx.communicability_centrality(graphs[g]))
 #print "communicability_betweenness_centrality",'\t\t', get_avg(nx.communicability_betweenness_centrality(graphs[g]))
 #print "transitivity",'\t\t\t', round(nx.transitivity(graphs[g]),4)
 #print "laplacian_spectrum",'\t\t\n:', nx.laplacian_spectrum(graphs[g])
 print "adjacency_spectrum",'\t\tMin 5 :', get_min(nx.adjacency_spectrum(graphs[g])) , "\t\tMax 5 :",get_max(nx.adjacency_spectrum(graphs[g]))
 print "laplacian_spectrum",'\t\tMin 5 :', get_min(nx.laplacian_spectrum(graphs[g])) , "\t\tMax 5 :",get_max(nx.laplacian_spectrum(graphs[g])) 
 #print "normalized_laplacian_spectrum",'\t\tMin 5 :', get_min(numpy.real(normalized_laplacian_spectrum(graphs[g]))) , "\t\tMax 5 :",get_max(normalized_laplacian_spectrum(graphs[g]))
 #print "adjacency_spectrum",'\t\t\n', nx.adjacency_spectrum(graphs[g])
 #print "laplacian_spectrum",'\t\t\n', nx.laplacian_spectrum(graphs[g])
 #print "normalized_laplacian_spectrum",'\t\t\n', normalized_laplacian_spectrum(graphs[g])
 ####print "adjacency_spectrum",'\t\t\n', numpy.around(numpy.real(nx.adjacency_spectrum(graphs[g])), decimals=4)
 ####print "laplacian_spectrum",'\t\t\n', numpy.around(numpy.real(nx.laplacian_spectrum(graphs[g])), decimals=4)
 ####print "normalized_laplacian_spectrum",'\t\t\n', numpy.around(numpy.real(normalized_laplacian_spectrum(graphs[g])), decimals=4)
 #statistics.pdf_to_textfile(numpy.real(numpy.around(nx.adjacency_spectrum(graphs[g]), decimals=2)).tolist(),g+"_adj_pdf.txt")
 # Write to a file
 #statistics.to_textfile(numpy.real(numpy.around(nx.adjacency_spectrum(graphs[g]), decimals=2)).tolist(),g+"_adj_pdf.txt")
 #statistics.pdf_to_textfile(numpy.real(numpy.around(nx.laplacian_spectrum(graphs[g]), decimals=2)).tolist(),g+"_pdf.txt")
 #statistics.cdf_to_textfile(numpy.real(numpy.around(nx.laplacian_spectrum(graphs[g]), decimals=2)).tolist(),g+"_cdf.txt")
 #statistics.pdf_to_textfile(numpy.real(numpy.around(normalized_laplacian_spectrum(graphs[g]), decimals=4)).tolist(),g+"_pdf.txt")
 #statistics.cdf_to_textfile(numpy.real(numpy.around(normalized_laplacian_spectrum(graphs[g]), decimals=4)).tolist(),g+"_cdf.txt")
Esempio n. 36
0
 def _compute(self, graph):
     s = nx.adjacency_spectrum(graph)
     return s
 def test_adjacency_spectrum(self):
     "Adjacency eigenvalues"
     evals=numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)])
     e=sorted(nx.adjacency_spectrum(self.P))
     assert_almost_equal(e,evals)
Esempio n. 38
0
import scipy.sparse
import time
import sys
import os


attr_list = [ #Average degree
             lambda g : np.mean([e for e in g.degree().values()]),
             # Average eccentricity
             lambda g : np.mean([i for i in nx.eccentricity(g).values()]),
             # Average closeness centrality
             lambda g : np.mean([e for e in nx.closeness_centrality(g).values()]),
             # Percentage of isolated points (i.e., degree(v) = 1)
             lambda g : float(len(np.where(np.array(nx.degree(g).values())==1)[0]))/g.order(),
             # Spectral radius (i.e., largest AM eigenvalue)
             lambda g : np.abs(nx.adjacency_spectrum(g))[0],
             # Spectral trace (i.e., sum of abs. eigenvalues)
             lambda g : np.sum(np.abs(nx.adjacency_spectrum(g))),
             # Label entropy, as defined in [2]
             lambda g : label_entropy([e[1]['type'] for e in g.nodes(data=True)]),
             # Mixing coefficient of attributes
             lambda g : np.linalg.det(nx.attribute_mixing_matrix(g,'type')),
             # Avg. #vertics with eccentricity == radius (i.e., central points)
             lambda g : np.mean(float(len(nx.center(g)))/g.order()),
             # Link impurity, as defined in [2]
             lambda g : link_impurity(g),
             # Diameter := max(eccentricity)
             lambda g : nx.diameter(g),
             # Radius := min(eccentricity)
             lambda g : nx.radius(g)]
def adja_spect(net):
    return spectrum(nx.adjacency_spectrum(net),'adjacency')