Ejemplo n.º 1
0
 def test_walktrap(self):
     g = get_string_graph()
     com = algorithms.walktrap(g)
     self.assertEqual(type(com.communities), list)
     if len(com.communities) > 0:
         self.assertEqual(type(com.communities[0]), list)
         self.assertEqual(type(com.communities[0][0]), str)
Ejemplo n.º 2
0
    def test_plot_sim_matrix(self):

        g = nx.karate_club_graph()
        coms = algorithms.louvain(g)
        coms2 = algorithms.walktrap(g)

        viz.plot_sim_matrix([coms, coms2],
                            evaluation.adjusted_mutual_information)

        plt.savefig("cluster.pdf")
        os.remove("cluster.pdf")
    def test_endntm(self):
        G = nx.karate_club_graph()

        coms_l = [
            algorithms.louvain(G),
            algorithms.label_propagation(G),
            algorithms.walktrap(G),
        ]
        coms = algorithms.endntm(G, coms_l)
        self.assertEqual(type(coms.communities), list)
        if len(coms.communities) > 0:
            self.assertEqual(type(coms.communities[0]), list)
            self.assertEqual(type(coms.communities[0][0]), int)
Ejemplo n.º 4
0
    def test_plot_com_stat(self):

        g = nx.karate_club_graph()
        coms = algorithms.louvain(g)
        coms2 = algorithms.walktrap(g)

        viz.plot_com_stat([coms, coms2], evaluation.size)

        plt.savefig("cluster.pdf")
        os.remove("cluster.pdf")

        viz.plot_com_stat(coms, evaluation.size)

        plt.savefig("cluster.pdf")
        os.remove("cluster.pdf")
Ejemplo n.º 5
0
    def test_plot_com_properties_relation(self):

        g = nx.karate_club_graph()
        coms = algorithms.louvain(g)
        coms2 = algorithms.walktrap(g)

        viz.plot_com_properties_relation([coms, coms2], evaluation.size,
                                         evaluation.internal_edge_density)

        plt.savefig("cluster.pdf")
        os.remove("cluster.pdf")

        viz.plot_com_properties_relation(coms, evaluation.size,
                                         evaluation.internal_edge_density)

        plt.savefig("cluster.pdf")
        os.remove("cluster.pdf")
 def walktrap(self):
     template = pd.read_csv("data/communities_template.node", " ", header='infer')
     saveTo = "Results/Communities/" + self.name + "_walktrap_communities.node"
     G = nx.Graph(self.g)
     result = algorithms.walktrap(G)
     modularity = result.newman_girvan_modularity().score
     significance = result.significance().score
     communities = result.to_node_community_map()
     n_communities = list(communities.values())[-1][0] + 1
     print("\nWalktrap Algorithm: ")
     print("#Communities: ", n_communities)
     print("Modularity: ", modularity)
     print("Significance: ", significance)
     d = self.get_ordered_communities(communities)
     template['Walktrap'] = d
     print("\n")
     template['Degree'] = [v for v in self.centrality]
     template['RegionName'] = self.region_names
     pd.DataFrame.to_csv(template, saveTo, " ", header=False, index=False)
     return d
Ejemplo n.º 7
0
import networkx as nx
import argparse


def get_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--edgelist',
                        dest='edgelist',
                        required=True,
                        help='tab separated edge list')

    parser.add_argument('--output',
                        dest='output',
                        required=True,
                        help='name of file to save the community to')

    args = parser.parse_args()
    return args


args = get_args()

G = nx.read_edgelist(args.edgelist)
coms = algorithms.walktrap(G)

with open(args.output, 'w') as outfile:
    for i, com in enumerate(coms.communities):
        outfile.write('\t'.join([str(i)] + com))
        outfile.write('\n')
Ejemplo n.º 8
0
 elif (options.method == 'label_propagation'):
     communities = algorithms.label_propagation(g, **clust_kwargs)
 elif (options.method == 'markov_clustering'):
     communities = algorithms.markov_clustering(g, **clust_kwargs)
 elif (options.method == 'rber_pots'):
     communities = algorithms.rber_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'rb_pots'):
     communities = algorithms.rb_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'significance_communities'):
     communities = algorithms.significance_communities(g, **clust_kwargs)
 elif (options.method == 'spinglass'):
     communities = algorithms.spinglass(g, **clust_kwargs)
 elif (options.method == 'surprise_communities'):
     communities = algorithms.surprise_communities(g, **clust_kwargs)
 elif (options.method == 'walktrap'):
     communities = algorithms.walktrap(g, **clust_kwargs)
 #elif(options.method == 'sbm_dl'):
 #	communities = algorithms.sbm_dl(g)
 #elif(options.method == 'sbm_dl_nested'):
 #	communities = algorithms.sbm_dl_nested(g)
 elif (options.method == 'lais2'):
     communities = algorithms.lais2(g, **clust_kwargs)
 elif (options.method == 'big_clam'):
     communities = algorithms.big_clam(g, **clust_kwargs)
 elif (options.method == 'danmf'):
     communities = algorithms.danmf(g, **clust_kwargs)
 elif (options.method == 'ego_networks'):
     communities = algorithms.ego_networks(g, **clust_kwargs)
 elif (options.method == 'egonet_splitter'):
     communities = algorithms.egonet_splitter(g, **clust_kwargs)
 elif (options.method == 'nmnf'):
    eigenvector_partition = cd.eigenvector(LFR_G)
    eigenvector_labels = extract_communities_list(eigenvector_partition.communities)
    nmi_eigenvector.append(normalized_mutual_info_score(true_labels, eigenvector_labels))

    ############################### Louvian ###############################
    louvian_partition = cd.louvain(LFR_G)
    louvian_labels = extract_communities_list(louvian_partition.communities)
    nmi_louvian.append(normalized_mutual_info_score(true_labels, louvian_labels))

    ############################### Leiden ###############################
    leiden_partition = cd.leiden(LFR_G)
    leiden_labels = extract_communities_list(leiden_partition.communities)
    nmi_leiden.append(normalized_mutual_info_score(true_labels, louvian_labels))

    ############################### Walktrap ###############################
    walktrap_partition = cd.walktrap(LFR_G)
    walktrap_labels = extract_communities_list(walktrap_partition.communities)
    nmi_walktrap.append(normalized_mutual_info_score(true_labels, walktrap_labels))

    ############################### Markov Clustering ###############################
    markov_partition = cd.markov_clustering(LFR_G)
    markov_labels = extract_communities_list(markov_partition.communities)
    nmi_markov.append(normalized_mutual_info_score(true_labels, markov_labels))

    ############################### Greedy ###############################
    greedy_partition = cd.greedy_modularity(LFR_G)
    greedy_labels = extract_communities_list(greedy_partition.communities)
    nmi_greedy.append(normalized_mutual_info_score(true_labels, greedy_labels))

    ############################### Label Propagation ###############################
    propagation_partition = cd.label_propagation(LFR_G)
ari_louvian = adjusted_rand_score(true_communities, louvian_communities)
vi_louvian = variation_of_information(true_partitions, louvian_partitions)

############################### Leiden ###############################
start_time = time.time()
leiden_partition = cd.leiden(G)
leiden_time = time.time() - start_time
leiden_communities = extract_communities_list(leiden_partition.communities, G)
leiden_partitions = get_partitions(leiden_communities)
nmi_ledien = normalized_mutual_info_score(true_communities, leiden_communities)
ari_leiden = adjusted_rand_score(true_communities, leiden_communities)
vi_leiden = variation_of_information(true_partitions, leiden_partitions)

############################### Walktrap ###############################
start_time = time.time()
walktrap_partition = cd.walktrap(G)
walktrap_time = time.time() - start_time
walktrap_communities = extract_communities_list(walktrap_partition.communities,
                                                G)
walktrap_partitions = get_partitions(walktrap_communities)
nmi_walktrap = normalized_mutual_info_score(true_communities,
                                            walktrap_communities)
ari_walktrap = adjusted_rand_score(true_communities, walktrap_communities)
vi_walktrap = variation_of_information(true_partitions, walktrap_partitions)

############################### Markov Clustering ###############################
start_time = time.time()
markov_partition = cd.markov_clustering(G)
markov_time = time.time() - start_time
markov_communities = extract_communities_list(markov_partition.communities, G)
markov_partitions = get_partitions(markov_communities)