示例#1
0
def print_clustering_metrics(graph):
	print("\n===== Clustering Metrics ======")
	if not graph.is_multigraph():
		print("Transitivity: ", nx.transitivity(graph))
		print("")
	
		if not nx.is_directed(graph):
			triangles_values = list(nx.triangles(graph).values())
			print("# triangles: ", sum(triangles_values))
			print("Average triangles: ", sum(triangles_values)/len(triangles_values))
			print("Minimum # triangles: ", min(triangles_values))
			print("Maximum # triangles: ", max(triangles_values))
			print_top_n_by_metric(nx.triangles(graph),"# of triangles")
			
			clustercoff_values = list(nx.clustering(graph).values())
			print("Average clustering coefficient: ", sum(clustercoff_values)/len(clustercoff_values))
			print("Minimum clustering coefficient: ", min(clustercoff_values))
			print("Maximum clustering coefficient: ", max(clustercoff_values))
			print_top_n_by_metric(nx.clustering(graph),"clustering coefficient")
			print("")

			clustercoff_values = list(nx.square_clustering(graph).values())
			print("Average square clustering coefficient: ", sum(clustercoff_values)/len(clustercoff_values))
			print("Minimum square clustering coefficient: ", min(clustercoff_values))
			print("Maximum square clustering coefficient: ", max(clustercoff_values))
			print_top_n_by_metric(nx.square_clustering(graph), "square clustering coefficient")
			print("")
	else:
		print("Multigraph clustering metrics are not supported by Networkx")
示例#2
0
 def test_path(self):
     G = nx.path_graph(10)
     assert_equal(list(nx.square_clustering(G).values()),
                  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
     assert_equal(nx.square_clustering(G),
                  {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
                   5: 0.0, 6: 0.0, 7: 0.0, 8: 0.0, 9: 0.0})
示例#3
0
 def test_cubical(self):
     G = nx.cubical_graph()
     assert_equal(list(nx.square_clustering(G).values()),
                  [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
     assert_equal(list(nx.square_clustering(G,[1,2]).values()),[0.5, 0.5])
     assert_equal(nx.square_clustering(G,[1])[1],0.5)
     assert_equal(nx.square_clustering(G,[1,2]),{1: 0.5, 2: 0.5})
示例#4
0
 def test_path(self):
     G = nx.path_graph(10)
     assert list(nx.square_clustering(G).values()) == [
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
         0.0,
     ]
     assert nx.square_clustering(G) == {
         0: 0.0,
         1: 0.0,
         2: 0.0,
         3: 0.0,
         4: 0.0,
         5: 0.0,
         6: 0.0,
         7: 0.0,
         8: 0.0,
         9: 0.0,
     }
示例#5
0
 def test_lind_square_clustering(self):
     """Test C4 for figure 1 Lind et al (2005)"""
     G = nx.Graph([
         (1, 2),
         (1, 3),
         (1, 6),
         (1, 7),
         (2, 4),
         (2, 5),
         (3, 4),
         (3, 5),
         (6, 7),
         (7, 8),
         (6, 8),
         (7, 9),
         (7, 10),
         (6, 11),
         (6, 12),
         (2, 13),
         (2, 14),
         (3, 15),
         (3, 16),
     ])
     G1 = G.subgraph([1, 2, 3, 4, 5, 13, 14, 15, 16])
     G2 = G.subgraph([1, 6, 7, 8, 9, 10, 11, 12])
     assert nx.square_clustering(G, [1])[1] == 3 / 43.0
     assert nx.square_clustering(G1, [1])[1] == 2 / 6.0
     assert nx.square_clustering(G2, [1])[1] == 1 / 5.0
示例#6
0
 def test_cubical(self):
     G = nx.cubical_graph()
     assert (list(nx.square_clustering(G).values()) == [
         0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5
     ])
     assert list(nx.square_clustering(G, [1, 2]).values()) == [0.5, 0.5]
     assert nx.square_clustering(G, [1])[1] == 0.5
     assert nx.square_clustering(G, [1, 2]) == {1: 0.5, 2: 0.5}
示例#7
0
 def test_lind_square_clustering(self):
     """Test C4 for figure 1 Lind et al (2005)"""
     G = nx.Graph([(1,2),(1,3),(1,6),(1,7),(2,4),(2,5),
                   (3,4),(3,5),(6,7),(7,8),(6,8),(7,9),
                   (7,10),(6,11),(6,12),(2,13),(2,14),(3,15),(3,16)])
     G1 = G.subgraph([1,2,3,4,5,13,14,15,16])
     G2 = G.subgraph([1,6,7,8,9,10,11,12])
     assert_equal(nx.square_clustering(G, [1])[1], 3/75.0)
     assert_equal(nx.square_clustering(G1, [1])[1], 2/6.0)
     assert_equal(nx.square_clustering(G2, [1])[1], 1/5.0)
示例#8
0
 def test_cubical(self):
     G = nx.cubical_graph()
     assert list(nx.square_clustering(G).values()) == [
         1 / 3,
         1 / 3,
         1 / 3,
         1 / 3,
         1 / 3,
         1 / 3,
         1 / 3,
         1 / 3,
     ]
     assert list(nx.square_clustering(G, [1, 2]).values()) == [1 / 3, 1 / 3]
     assert nx.square_clustering(G, [1])[1] == 1 / 3
     assert nx.square_clustering(G, [1, 2]) == {1: 1 / 3, 2: 1 / 3}
示例#9
0
def clustering(graph, name='cluster'):
    """
    Calculates the squares clustering coefficient for nodes.

    .. math::


    Parameters
    ----------
    graph : networkx.Graph
        Graph representing street network.
        Ideally genereated from GeoDataFrame using :py:func:`momepy.gdf_to_nx`
    name : str, optional
        calculated attribute name

    Returns
    -------
    Graph
        networkx.Graph

    References
    ----------

    Examples
    --------

    """
    netx = graph

    vals = nx.square_clustering(netx)
    nx.set_node_attributes(netx, vals, name)

    return netx
示例#10
0
def node_properties_color_map(graph, coloring_method):

    node_color = {}
    if coloring_method is 'betwenness_centrality':
        node_color = nx.betweenness_centrality(graph)
    elif coloring_method is 'degree_centrality':
        node_color = nx.degree_centrality(graph)
    elif coloring_method is 'closeness_centrality':
        node_color = nx.closeness_centrality(graph)
    elif coloring_method is 'eigenvector_centrality':
        node_color = nx.eigenvector_centrality(graph)
    elif coloring_method is 'connected_components':
        componentsDictionary = nx.connected_components(graph)
        componentLabel = 0
        for componentNodes in componentsDictionary:
            for node in componentNodes:
                node_color[node] = componentLabel
            componentLabel = componentLabel + 1
    elif coloring_method is 'maximum_modularity_partition':
        node_color = community.best_modularity_partition(graph)
    elif coloring_method is 'clustering_coefficient':
        nx.clustering(graph)
        node_color = nx.square_clustering(graph)
    elif coloring_method is 'emisphere':
        node_color = community.best_partition(graph)
        for n in node_color.keys():
            if n[-1] == 'L':
                node_color[n] = 0.0
            else:
                node_color[n] = 1.0
    elif coloring_method is 'maximum_surprise_partition':
        node_color = community.best_surprise_partition_louvain(graph)
    else:
        raise Exception('Non supported coloring_method')
    return community.__renumber(node_color)
示例#11
0
def clustering(graph, name="cluster"):
    """
    Calculates the squares clustering coefficient for nodes.

    Wrapper around ``networkx.square_clustering``.


    Parameters
    ----------
    graph : networkx.Graph
        Graph representing street network.
        Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
    name : str, optional
        calculated attribute name

    Returns
    -------
    Graph
        networkx.Graph

    Examples
    --------
    >>> network_graph = mm.clustering(network_graph)
    """
    netx = graph.copy()

    vals = nx.square_clustering(netx)
    nx.set_node_attributes(netx, vals, name)

    return netx
示例#12
0
def AttackClustering(graph):
    iteration = 0
    pos = nx.spring_layout(graph, k=0.3)
    clustering = nx.square_clustering(graph)
    clusteringList = list(clustering.items())
    clusteringList.sort(key=lambda i: i[1], reverse=True)
    size = len(graph.nodes)
    while True:
        if (len(graph.nodes) <= 1):
            return 0, 0

        if ((len(graph.nodes) == 0) and (len(graph.edges) != 0)):
            SaveGraph(graph, size, pos, 'gif/pic%s.png' % iteration)
            intaktnost = GenerateIntaktnost(graph, size, pos)
            return intaktnost, iteration

        if not nx.is_connected(graph):
            SaveGraph(graph, size, pos, 'gif/pic%s.png' % iteration)
            intaktnost = GenerateIntaktnost(graph, size, pos)
            return intaktnost, iteration

        if ((iteration + 1) % (size / 10)) == 0:
            SaveGraph(graph, size, pos, 'gif/pic%s.png' % iteration)
        graph.remove_node(clusteringList[iteration][0])
        iteration += 1
示例#13
0
def ClusteringSQ_Calc(G):
    nodeClustering = nx.square_clustering(G)
    nodeClustering = dict(nodeClustering)
    maxClustering = max(nodeClustering.values())
    minClustering = min(nodeClustering.values())
    averageClustering = (sum(nodeClustering.values()) / len(nodeClustering))
    return maxClustering, minClustering, averageClustering, nodeClustering
    def data_analysis(self, graph):
        data_vec = [0] * 13
        num_vertex = nx.number_of_nodes(graph)

        data_vec[0] = nx.average_clustering(graph)

        sq_values = list(nx.square_clustering(graph).values())
        data_vec[1] = sum(sq_values) / len(sq_values)

        g = nx.path_graph(num_vertex)
        data_vec[2] = nx.average_shortest_path_length(
            graph) / nx.average_shortest_path_length(g)

        data_vec[3] = nx.degree_pearson_correlation_coefficient(graph)
        if math.isnan(data_vec[3]) is True:
            data_vec[3] = 0

        data_vec[4] = nx.diameter(graph) / (num_vertex - 1)
        data_vec[5] = nx.density(graph)

        data_vec[6] = nx.edge_connectivity(graph) / (num_vertex - 1)

        g = nx.star_graph(num_vertex - 1)
        Freeman_degree_norm = self.freeman_centralization(
            nx.degree_centrality(g))
        Freeman_close_norm = self.freeman_centralization(
            nx.closeness_centrality(g))
        Freeman_between_norm = self.freeman_centralization(
            nx.betweenness_centrality(g))
        # need to change
        Freeman_eigen_norm = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(g))

        data_vec[7] = self.freeman_centralization(
            nx.degree_centrality(graph)) / Freeman_degree_norm
        data_vec[8] = self.freeman_centralization(
            nx.closeness_centrality(graph)) / Freeman_close_norm
        data_vec[9] = self.freeman_centralization(
            nx.betweenness_centrality(graph)) / Freeman_between_norm

        # warning, the way it normalized may not correct
        data_vec[10] = self.freeman_centralization(
            nx.eigenvector_centrality_numpy(graph)) / Freeman_eigen_norm

        egvl_lap = nx.laplacian_spectrum(graph)
        egvl_lap = np.sort(egvl_lap)
        egvl_lap = np.delete(egvl_lap, 0, 0)
        summ = 0
        for mu in egvl_lap:
            summ += (1 / mu)

        summ = summ * num_vertex
        data_vec[11] = (num_vertex - 1) / summ

        # for simple graph(adj matrix is symmetric), eigenvalue must be real number.
        egvl_adj = np.real(nx.adjacency_spectrum(graph))
        data_vec[12] = max(egvl_adj) / (num_vertex - 1)

        return data_vec
示例#15
0
 def square_clustering(self):
     """Devuelve un diccionario con el valor square_clustering para cada nodo"""
     try:
         g = self.g
         self.metrics_dict['square-clustering'] = nx.square_clustering(g)
         return self.metrics_dict['square-clustering']
     except nx.exception.PowerIterationFailedConvergence:
         self.logging_message("Clustering:Power iteration failed.")
         return self.empty_dict()
示例#16
0
def print_sna_metrics(g):
    """
    Prints some SNA metrics using the NetworkX library
    :param g: ResultSet (cypher)
    """

    print
    start_time = time.time()
    degree_centrality = nx.degree_centrality(g)
    print("Degree Centrality spent %s seconds" % (time.time() - start_time))
    user = max(degree_centrality, key=degree_centrality.get)
    print 'User with maximum Degree Centrality:', user, '- Value:', degree_centrality.get(
        user)
    print 'Average Degree Centrality:', np.array(
        degree_centrality.values()).mean()

    print
    start_time = time.time()
    closeness_centrality = nx.closeness_centrality(g)
    print("Closeness Centrality spent %s seconds" % (time.time() - start_time))
    user = max(closeness_centrality, key=closeness_centrality.get)
    print 'User with maximum Closeness Centrality:', user, '- Value:', closeness_centrality.get(
        user)
    print 'Average Closeness Centrality:', np.array(
        closeness_centrality.values()).mean()

    print
    start_time = time.time()
    betweenness_centrality = nx.betweenness_centrality(g)
    print("Betweenness Centrality spent %s seconds" %
          (time.time() - start_time))
    user = max(betweenness_centrality, key=betweenness_centrality.get)
    print 'User with maximum Betweenness Centrality:', user, '- Value:', betweenness_centrality.get(
        user)
    print 'Average Betweenness Centrality:', np.array(
        betweenness_centrality.values()).mean()

    print
    start_time = time.time()
    print 'Graph Density:', nx.density(g)
    print("Density spent %s seconds" % (time.time() - start_time))

    print
    start_time = time.time()
    square_clustering = nx.square_clustering(g)
    print("Square Clustering spent %s seconds" % (time.time() - start_time))
    user = max(square_clustering, key=square_clustering.get)
    print 'User with maximum Square Clustering:', user, '- Value:', square_clustering.get(
        user)
    print 'Average Square Clustering:', np.array(
        square_clustering.values()).mean()

    print
    start_time = time.time()
    print 'Nodes\' Degree:', nx.degree(g)
    print("Degree spent %s seconds" % (time.time() - start_time))
    def f23(self):
        start = 0

        square_dic = nx.square_clustering(self.G)
        total = sum(square_dic.values())
        no = len(square_dic.values())
        res = total/no
        stop = 0
        # self.feature_time.append(stop - start)
        return res
示例#18
0
def calculate(network):
    try:
        n = nx.square_clustering(network)
    except:
        return 0
 
    if len(n.values()) == 0: 
        return 0  
    else:
        return round(sum(n.values())/len(n.values()), 7) 
    def f23(self):
        start = 0

        square_dic = nx.square_clustering(self.G)
        total = sum(square_dic.values())
        no = len(square_dic.values())
        res = total / no
        stop = 0
        # self.feature_time.append(stop - start)
        return res
示例#20
0
    def data_analysis(self, graph):
        data_vec = [0] * 10
        # print graph.edges
        data_vec[0] = nx.transitivity(graph)
        # print(Matrix[0])
        try:
            data_vec[1] = nx.average_clustering(graph)
        except:
            data_vec[1] = 0

        dic = nx.square_clustering(graph).values()
        summation = 0
        for e in dic:
            summation = summation + e
        try:
            data_vec[2] = summation / len(dic)
        except:
            data_vec[2] = 0

        if nx.number_connected_components(graph) != 1:
            Gc = max(nx.connected_component_subgraphs(graph), key=len)
            if nx.number_of_nodes(Gc) != 1:
                data_vec[3] = nx.average_shortest_path_length(Gc)
        else:
            data_vec[3] = nx.average_shortest_path_length(graph)

        try:
            data_vec[4] = nx.degree_assortativity_coefficient(graph)
        except:
            data_vec[4] = 0
        if math.isnan(data_vec[4]) is True:
            data_vec[4] = 0

        if nx.number_connected_components(graph) != 1:
            Gc = max(nx.connected_component_subgraphs(graph), key=len)
            data_vec[5] = nx.diameter(Gc)
        else:
            data_vec[5] = nx.diameter(graph)

        data_vec[6] = nx.density(graph)

        # triangle part, calculate the ratio of triangle
        node_N = nx.number_of_nodes(graph)
        if node_N < 3:
            data_vec[7] = 0
        else:
            triangle = sum(nx.triangles(graph).values()) / 3
            C_Triangle = math.factorial(node_N) / math.factorial(
                node_N - 3) / math.factorial(3)
            data_vec[7] = float(triangle) / C_Triangle

        data_vec[8] = nx.node_connectivity(graph)
        data_vec[9] = nx.edge_connectivity(graph)
        # print data_vec
        return data_vec
示例#21
0
 def test_peng_square_clustering(self):
     """Test eq2 for figure 1 Peng et al (2008)"""
     G = nx.Graph([
         (1, 2),
         (1, 3),
         (2, 4),
         (3, 4),
         (3, 5),
         (3, 6),
     ])
     assert nx.square_clustering(G, [1])[1] == 1 / 3
示例#22
0
def analyzeBipartiteGraph(graph, e_nodes):

    # average shortest path length
    L_r = nx.average_shortest_path_length(graph)
    # square clustering coefficient of electrodes
    C_r_dict = nx.square_clustering(graph, e_nodes)
    C_r_list = []
    for key in C_r_dict:
        C_r_list.append(C_r_dict[key])
    C_r = sum(C_r_list) / len(C_r_list)

    # return L_r and C_r for later analysis
    return [L_r, C_r]
示例#23
0
def calculatesquareclustering(network):
    '''
    Compute the squares clustering coefficient for nodes: the fraction of possible squares that exist at the node.

    '''
    try:
        n = nx.square_clustering(network)
    except:
        return 0
 
    if len(n.values()) == 0: 
        return 0  
    else:
        return round(sum(n.values())/len(n.values()), 7)
示例#24
0
def get_all_proximity_score(G, edges):
    proximity_score_list = [[] for i in itertools.repeat(None, len(edges))]
    cc = [
        nx.square_clustering(G, edge[0]) + nx.square_clustering(G, edge[1])
        for edge in edges
    ]
    cn = [
        len(list(nx.common_neighbors(G, edge[0], edge[1]))) for edge in edges
    ]
    jc = nx.jaccard_coefficient(G, edges)
    pa = nx.preferential_attachment(G, edges)
    rai = nx.resource_allocation_index(G, edges)
    for i, data in enumerate(cc):
        proximity_score_list[i].append(data)
    for i, data in enumerate(cn):
        proximity_score_list[i].append(data)
    for i, data in enumerate(jc):
        proximity_score_list[i].append(data[2])
    for i, data in enumerate(pa):
        proximity_score_list[i].append(data[2])
    for i, data in enumerate(rai):
        proximity_score_list[i].append(data[2])
    return proximity_score_list
def write_features(all_graphs):
    with open("", "w") as f:

        nr_graphs = len(all_graphs)
        f.write(str(nr_graphs) + "\n")

        for graph_name, graph in all_graphs.items():
            print(graph_name)
            nr_example, state, _ = graph_name.split("-")
            label = create_labels(state)
            print(label)

            e_centrality = nx.eigenvector_centrality_numpy(graph)
            bet_centrality = nx.betweenness_centrality(graph, k=30)
            # cf_closeness_centrality = nx.current_flow_closeness_centrality(graph)
            # cf_bet_centrality = nx.current_flow_betweenness_centrality(graph)
            load_centrality = nx.load_centrality(graph)
            clustering_coeff = nx.clustering(graph)
            square_clustering_coeff = nx.square_clustering(graph)

            f.write("{} {}\n".format(str(graph.number_of_nodes()), str(label)))
            nodes = sorted(list(graph.nodes()))
            for n in nodes:
                neighbors = sorted(list(graph.neighbors(n)))
                if not neighbors:
                    f.write("{} {} ".format(str(tag), str(0)))
                else:
                    f.write("{} {} ".format(str(tag), str(len(neighbors))))
                    for neighbor in neighbors:
                        f.write("{} ".format(str(neighbor)))
                    cc = nx.closeness_centrality(graph, u=n)
                # degree
                f.write("{} ".format(graph.degree(n)))
                # eigenvector centrality
                f.write("{} ".format(e_centrality[n]))
                # betweeness centrality
                f.write("{} ".format(bet_centrality[n]))
                # closeness centrality
                f.write("{} ".format(cc))
                # current flow closeness centrality
                f.write("{} ".format(cf_closeness_centrality[n]))
                # current flow betweeness centrality
                f.write("{} ".format(cf_bet_centrality[n]))
                # load centrality
                f.write("{} ".format(load_centrality[n]))
                # clustering coefficient
                f.write("{} ".format(clustering_coeff[n]))
                # square clustering coefficient
                f.write("{} ".format(square_clustering_coeff[n]))
                f.write("\n")
示例#26
0
    def cluster(self):
        rslt = {}

        rslt['transitivity'] = nx.transitivity(self.graph)
        rslt['square_clustering'] = nx.square_clustering(self.graph)

        if self.directed == 'undirected':
            rslt['traingles'] = nx.triangles(self.graph)
            rslt['clustering'] = nx.clustering(self.graph)
            rslt['average_clustering'] = nx.average_clustering(self.graph)

        fname_cluster = self.DIR + '/cluster.json'
        with open(fname_cluster, "w") as f:
            json.dump(rslt, f, cls=SetEncoder, indent=2)
        print(fname_cluster)
示例#27
0
def summay_results(nets=None, years=None):
    if years is None:
        years = default_years
    if nets is None:
        nets = networks_by_year()
    result = {}
    previous_devs = None
    for year, G in zip(years, nets):
        result[year] = {}
        devs = set(n for n, d in G.nodes(data=True) if d['bipartite']==1)
        files = set(G) - devs
        result[year]['guido_in'] = u'Guido van Rossum' in G
        result[year]['density'] = bp.density(G, devs)
        cc = sorted(nx.connected_components(G), key=len, reverse=True)
        result[year]['cc'] = len(cc[0]) / float(G.order()) if cc else 0
        bcc = sorted(nx.biconnected_components(G), key=len, reverse=True)
        result[year]['bcc'] = len(bcc[0]) / float(G.order()) if bcc else 0
        result[year]['devs'] = len(devs)
        result[year]['files'] = len(files)
        result[year]['py_files'] = len([f for f in files if f.endswith('.py')])
        result[year]['c_files'] = len([f for f in files if f.endswith('.c') 
                                            or f.endswith('.h')])
        result[year]['doc_files'] = len([f for f in files if f.lower().endswith('.txt') 
                                            or f.endswith('.rst') 
                                            or f.endswith('.tex')])
        result[year]['weight'] = sum(nx.degree(G, devs, weight='weight').values())
        result[year]['added'] = sum(nx.degree(G, devs, weight='added').values())
        result[year]['deleted'] = sum(nx.degree(G, devs, weight='deleted').values())
        result[year]['edits'] = sum(nx.degree(G, devs, weight='edits').values())
        result[year]['sq_clustering'] = (sum(nx.square_clustering(G, devs).values()) 
                                            / float(len(devs)))
        if previous_devs is None:
            # First year
            result[year]['new_devs'] = len(devs)
            result[year]['continue_devs'] = 0
            result[year]['lost_devs'] = 0
        else:
            result[year]['new_devs'] = len(devs - previous_devs)
            result[year]['continue_devs'] = len(devs & previous_devs)
            result[year]['lost_devs'] = len(previous_devs - devs)
        previous_devs = devs
    return result
示例#28
0
def cluster_coefficient():
    """
        calculate the cluster coefficient and sort nodes by high eccentricity
        :return: sorted list of nodes
    """
    global G, calculated_ranks
    if len(calculated_ranks['cluster_coefficient']) == 0:
        cluster_coeff_per_node = list(nx.square_clustering(G).items())
        degree_per_node = list(G.degree())
        degree_per_node.sort(key=lambda x: x[0])
        cluster_coeff_per_node.sort(key=lambda x: x[0])
        adj_cluster_coeff_per_node = [
            (degree_per_node[i][0], math.log(degree_per_node[i][1]) *
             cluster_coeff_per_node[i][1]) if degree_per_node[i][1] > 1 else
            (degree_per_node[i][0], 0) for i in range(len(degree_per_node))
        ]
        calculated_ranks[
            'cluster_coefficient'] = sort_and_map_tuple_list_to_name(
                adj_cluster_coeff_per_node)
    return calculated_ranks['cluster_coefficient']
示例#29
0
def AttackClustering(graph):
    iteration = 0

    clustering = nx.square_clustering(graph)
    list_d = list(clustering.items())
    list_d.sort(key=lambda i: i[1], reverse=True)

    while True:
        if (len(graph.nodes) <= 1):
            return 0, 0

        if ((len(graph.nodes) == 0) and (len(graph.edges) != 0)):
            intaktnost = GenerateIntaktnost(graph)
            return intaktnost, iteration

        if not nx.is_connected(graph):
            intaktnost = GenerateIntaktnost(graph)
            return intaktnost, iteration

        graph.remove_node(list_d[iteration][0])
        iteration += 1
示例#30
0
    def compute_features(self):

        triang = lambda graph: np.asarray(list(nx.triangles(graph).values())
                                          ).mean()
        self.add_feature(
            "num_triangles",
            triang,
            "Number of triangles in the graph",
            InterpretabilityScore("max"),
        )

        transi = lambda graph: nx.transitivity(graph)
        self.add_feature(
            "transitivity",
            transi,
            "Transitivity of the graph",
            InterpretabilityScore("max"),
        )

        # Average clustering coefficient
        clustering_dist = lambda graph: list(nx.clustering(graph).values())
        self.add_feature(
            "clustering",
            clustering_dist,
            "the clustering of the graph",
            InterpretabilityScore("max"),
            statistics="centrality",
        )

        # generalised degree
        square_clustering_dist = lambda graph: list(
            nx.square_clustering(graph).values())
        self.add_feature(
            "square_clustering",
            square_clustering_dist,
            "the square clustering of the graph",
            InterpretabilityScore("max"),
            statistics="centrality",
        )
    def node_attributes(self):
        result = {}

        result['degree_centrality'] = nx.degree_centrality(self.graph)
        result['in_degree_centrality'] = nx.in_degree_centrality(self.graph)
        result['out_degree_centrality'] = nx.out_degree_centrality(self.graph)
        result['closeness_centrality'] = nx.closeness_centrality(self.graph)
        result['betweenness_centrality'] = nx.betweenness_centrality(
            self.graph)
        result['load_centrality'] = nx.load_centrality(self.graph)
        result['average_neighbor_degree'] = nx.average_neighbor_degree(
            self.graph)
        result['square_clustering'] = nx.square_clustering(self.graph)
        result['closeness_vitality'] = nx.closeness_vitality(self.graph)

        # nodes attributes
        node_attributes = []
        for node in self.graph.nodes():
            node_attributes.append((node, result['degree_centrality'][node],
                                    result['in_degree_centrality'][node],
                                    result['out_degree_centrality'][node],
                                    result['closeness_centrality'][node],
                                    result['betweenness_centrality'][node],
                                    result['load_centrality'][node],
                                    result['average_neighbor_degree'][node],
                                    result['square_clustering'][node],
                                    result['closeness_vitality'][node]))

        node_attributes.insert(0, [
            'node', 'degree_centrality', 'in_degree_centrality',
            'out_degree_centrality', 'closeness_centrality',
            'betweenness_centrality', 'load_centrality',
            'average_neighbor_degree', 'square_clustering',
            'closeness_vitality'
        ])

        return node_attributes
示例#32
0
import networkx as nx
import plot_multigraph
from matplotlib import pylab as plt

n = 80
p = 10. / n
G = nx.fast_gnp_random_graph(n, p, seed=42)

def to_list(dict_):
  return [dict_[k] for k in G.nodes()]

graph_colors = [
  ("eccentricity", to_list(nx.eccentricity(G))),
  ("clustering", to_list(nx.clustering(G))),
  ("square_clustering", to_list(nx.square_clustering(G))),
]

fig = plot_multigraph.plot_color_multigraph(G, graph_colors, 2, 2, node_size=50)
plt.savefig('graphs/distance.png', facecolor=fig.get_facecolor())
示例#33
0
if needs_tri:
  print "[+] Computing number of triangles..."
  tri = pd.Series(nx.triangles(graph), name='triangles')

if needs_clo:
  print "[+] Computing closeness centrality..."
  clo = pd.Series(nx.closeness_centrality(graph), name='closeness_centrality')

if needs_pag:
  print "[+] Computing pagerank..."
  pag = pd.Series(nx.pagerank(graph), name='pagerank')

if needs_squ:
  print "[+] Computing square clustering..."
  squ = pd.Series(nx.square_clustering(graph), name='square_clustering_coefficient')

# Always run: connected components
print "[+] Computing connected components"
_cco = {}
for i, c in enumerate(nx.connected_components(graph)):
    for e in c:
        _cco[e] = i
cco = pd.Series(_cco, name='connected_component_id')


# Putting all results together
print "[+] Preparing output"
stats = pd.DataFrame(deg)
stats = stats.join(cco)
if needs_eig:
示例#34
0
 def test_bipartite_k5(self):
     G = nx.complete_bipartite_graph(5, 5)
     assert list(nx.square_clustering(G).values()) == [
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1
     ]
示例#35
0
 def test_bipartite_k5(self):
     G = nx.complete_bipartite_graph(5,5)
     assert_equal(list(nx.square_clustering(G).values()),
                     [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
	def square_clustering_coefficient_sum(self):
		return nx.square_clustering(self.graph, [self.node_1])[self.node_1] + nx.square_clustering(Gt, [self.node_2])[self.node_2]
示例#37
0
 def test_clustering(self):
     G = nx.Graph()
     assert_equal(list(nx.square_clustering(G).values()),[])
     assert_equal(nx.square_clustering(G),{})
示例#38
0
 def getSquares(self, graph):
     squareDict = nx.square_clustering(graph)
     square = [squareDict[k] for k in squareDict]
     return square