def test_onmi(self): g = nx.karate_club_graph() lp_communities = label_propagation(g) lp2_communities = label_propagation(g) score = evaluation.overlapping_normalized_mutual_information_MGH(lp2_communities, lp_communities) self.assertLessEqual(score.score, 1) self.assertGreaterEqual(score.score, 0) score = evaluation.overlapping_normalized_mutual_information_LFK(lp2_communities, lp_communities) self.assertLessEqual(score.score, 1) self.assertGreaterEqual(score.score, 0)
def test_lp(self): g = get_string_graph() coms = algorithms.label_propagation(g) self.assertEqual(type(coms.communities), list) if len(coms.communities) > 0: self.assertEqual(type(coms.communities[0]), list) self.assertEqual(type(coms.communities[0][0]), str)
def test_variation_of_information(self): g = nx.karate_club_graph() lp_communities = label_propagation(g) louvain_communities = louvain(g) score = evaluation.variation_of_information(louvain_communities, lp_communities) self.assertLessEqual(score.score, np.log(g.number_of_nodes())) self.assertGreaterEqual(score.score, 0)
def test_adjusted_rand(self): g = nx.karate_club_graph() lp_communities = label_propagation(g) louvain_communities = louvain(g) score = evaluation.adjusted_rand_index(louvain_communities, lp_communities) self.assertLessEqual(score.score, 1) self.assertGreaterEqual(score.score, 0)
def test_f1(self): g = nx.karate_club_graph() lp_communities = label_propagation(g) louvain_communities = louvain(g) score = evaluation.f1(louvain_communities, lp_communities) self.assertIsInstance(score, evaluation.MatchingResult)
def test_closeness_simple(self): g = nx.karate_club_graph() lp_communities = label_propagation(g) louvain_communities = louvain(g) score = evaluation.partition_closeness_simple(louvain_communities, lp_communities) self.assertLessEqual(score.score, 1) self.assertGreaterEqual(score.score, 0)
def test_nmi(self): g = nx.karate_club_graph() louvain_communities = louvain(g) lp_communities = label_propagation(g) score = evaluation.normalized_mutual_information(louvain_communities, lp_communities) self.assertLessEqual(score.score, 1) self.assertGreaterEqual(score.score, 0)
def test_plot_sim_matrix(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.label_propagation(g) viz.plot_sim_matrix([coms, coms2], evaluation.adjusted_mutual_information) plt.savefig("cluster.pdf") os.remove("cluster.pdf")
def test_endntm(self): G = nx.karate_club_graph() coms_l = [ algorithms.louvain(G), algorithms.label_propagation(G), algorithms.walktrap(G), ] coms = algorithms.endntm(G, coms_l) self.assertEqual(type(coms.communities), list) if len(coms.communities) > 0: self.assertEqual(type(coms.communities[0]), list) self.assertEqual(type(coms.communities[0][0]), int)
def test_plot_com_stat(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.label_propagation(g) viz.plot_com_stat([coms, coms2], evaluation.size) plt.savefig("cluster.pdf") os.remove("cluster.pdf") viz.plot_com_stat(coms, evaluation.size) plt.savefig("cluster.pdf") os.remove("cluster.pdf")
def test_ranking_comp(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.kclique(g, 2) coms3 = algorithms.label_propagation(g) rk = evaluation.ComparisonRanking([coms, coms2, coms3]) rk.rank(evaluation.overlapping_normalized_mutual_information_LFK) rk.rank(evaluation.overlapping_normalized_mutual_information_MGH) rk.rank(evaluation.omega) rnk, _ = rk.topsis() self.assertEqual(len(rnk), 3) pc = rk.bonferroni_post_hoc() self.assertLessEqual(len(pc), 4)
def test_plot_com_properties_relation(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.label_propagation(g) viz.plot_com_properties_relation([coms, coms2], evaluation.size, evaluation.internal_edge_density) plt.savefig("cluster.pdf") os.remove("cluster.pdf") viz.plot_com_properties_relation(coms, evaluation.size, evaluation.internal_edge_density) plt.savefig("cluster.pdf") os.remove("cluster.pdf")
def test_comparison(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.label_propagation(g) self.assertIsInstance(coms.normalized_mutual_information(coms2).score, float) self.assertIsInstance( coms.overlapping_normalized_mutual_information_LFK(coms2).score, float ) self.assertIsInstance( coms.overlapping_normalized_mutual_information_MGH(coms2).score, float ) self.assertIsInstance(coms.omega(coms2).score, float) self.assertIsInstance(coms.f1(coms2), evaluation.MatchingResult) self.assertIsInstance(coms.nf1(coms2).score, float) self.assertIsInstance(coms.adjusted_mutual_information(coms2).score, float) self.assertIsInstance(coms.adjusted_rand_index(coms2).score, float) self.assertIsInstance(coms.variation_of_information(coms2).score, float)
def community_discoverying_algorithms(g): """ All Community Discovery algorithms generate as result a NodeClustering object, allowing also for the generation of a JSON representation of the results. Then evaluate the clusters with fitness functions (ex. synthetic representation of its min/max/mean/std values ORD communitiy-wise value) """ print("Starting computing angel_coms") angel_coms = algorithms.angel(g.to_undirected(), threshold=0.25) write_on_file(angel_coms, "communities/angel.json") draw_community_graph(g, angel_coms, "communities/angel.png") print("END") print("Starting computing infomap_coms") infomap_coms = algorithms.infomap(g.to_undirected()) write_on_file(infomap_coms, "communities/infomap.json") draw_community_graph(g, infomap_coms, "communities/infomap.png") print("END") print("Starting computing louvain_coms") louvain_coms = algorithms.louvain(g.to_undirected()) write_on_file(louvain_coms, "communities/louvain.json") draw_community_graph(g, louvain_coms, "communities/louvain.png") print("END") print("Starting computing labelpropagation_coms") labelpropagation_coms = algorithms.label_propagation(g.to_undirected()) write_on_file(labelpropagation_coms, "communities/labelpropagation.json") draw_community_graph(g, labelpropagation_coms, "communities/labelpropagation.png") print("END") draw_cluster_violin_map( [angel_coms, infomap_coms, louvain_coms, labelpropagation_coms]) draw_cluster_heatmap( [angel_coms, infomap_coms, louvain_coms, labelpropagation_coms]) draw_plot_map([angel_coms, infomap_coms], 1) draw_plot_map([angel_coms, louvain_coms], 2) draw_plot_map([angel_coms, labelpropagation_coms], 3) draw_plot_map([infomap_coms, louvain_coms], 4) draw_plot_map([infomap_coms, labelpropagation_coms], 5) draw_plot_map([louvain_coms, labelpropagation_coms], 6)
def test_ranking(self): g = nx.karate_club_graph() coms = algorithms.louvain(g) coms2 = algorithms.demon(g, 0.25) coms3 = algorithms.label_propagation(g) coms4 = algorithms.angel(g, 0.6) rk = evaluation.FitnessRanking(g, [coms2, coms, coms3, coms4]) rk.rank(evaluation.fraction_over_median_degree) rk.rank(evaluation.edges_inside) rk.rank(evaluation.cut_ratio) rk.rank(evaluation.erdos_renyi_modularity) rk.rank(evaluation.newman_girvan_modularity) rk.rank(evaluation.modularity_density) rnk, _ = rk.topsis() self.assertEqual(len(rnk), 4) pc = rk.bonferroni_post_hoc() self.assertLessEqual(len(pc), 4)
elif (options.method == 'der'): communities = algorithms.der(g, **clust_kwargs) elif (options.method == 'edmot'): communities = algorithms.edmot(g, **clust_kwargs) elif (options.method == 'eigenvector'): communities = algorithms.eigenvector(g, **clust_kwargs) elif (options.method == 'gdmp2'): communities = algorithms.gdmp2(g, **clust_kwargs) elif (options.method == 'greedy_modularity'): communities = algorithms.greedy_modularity(g, weight='weight', **clust_kwargs) #elif(options.method == 'infomap'): # communities = algorithms.infomap(g) elif (options.method == 'label_propagation'): communities = algorithms.label_propagation(g, **clust_kwargs) elif (options.method == 'markov_clustering'): communities = algorithms.markov_clustering(g, **clust_kwargs) elif (options.method == 'rber_pots'): communities = algorithms.rber_pots(g, weights='weight', **clust_kwargs) elif (options.method == 'rb_pots'): communities = algorithms.rb_pots(g, weights='weight', **clust_kwargs) elif (options.method == 'significance_communities'): communities = algorithms.significance_communities(g, **clust_kwargs) elif (options.method == 'spinglass'): communities = algorithms.spinglass(g, **clust_kwargs) elif (options.method == 'surprise_communities'): communities = algorithms.surprise_communities(g, **clust_kwargs) elif (options.method == 'walktrap'): communities = algorithms.walktrap(g, **clust_kwargs) #elif(options.method == 'sbm_dl'):
ari_markov = adjusted_rand_score(true_communities, markov_communities) vi_markov = variation_of_information(true_partitions, markov_partitions) ############################### Greedy ############################### start_time = time.time() greedy_partition = cd.greedy_modularity(G) greedy_time = time.time() - start_time greedy_communities = extract_communities_list(greedy_partition.communities, G) greedy_partitions = get_partitions(greedy_communities) nmi_greedy = normalized_mutual_info_score(true_communities, greedy_communities) ari_greedy = adjusted_rand_score(true_communities, greedy_communities) vi_greedy = variation_of_information(true_partitions, greedy_partitions) ############################### Label Propagation ############################### start_time = time.time() propagation_partition = cd.label_propagation(G) propagation_time = time.time() - start_time propagation_communities = extract_communities_list( propagation_partition.communities, G) propagation_partitions = get_partitions(propagation_communities) nmi_propagation = normalized_mutual_info_score(true_communities, propagation_communities) ari_propagation = adjusted_rand_score(true_communities, propagation_communities) vi_propagation = variation_of_information(true_partitions, propagation_partitions) nmi_x = np.arange(8) ari_x = [x + 0.3 for x in nmi_x] vi_x = [x + 0.3 for x in ari_x]
walktrap_partition = cd.walktrap(LFR_G) walktrap_labels = extract_communities_list(walktrap_partition.communities) nmi_walktrap.append(normalized_mutual_info_score(true_labels, walktrap_labels)) ############################### Markov Clustering ############################### markov_partition = cd.markov_clustering(LFR_G) markov_labels = extract_communities_list(markov_partition.communities) nmi_markov.append(normalized_mutual_info_score(true_labels, markov_labels)) ############################### Greedy ############################### greedy_partition = cd.greedy_modularity(LFR_G) greedy_labels = extract_communities_list(greedy_partition.communities) nmi_greedy.append(normalized_mutual_info_score(true_labels, greedy_labels)) ############################### Label Propagation ############################### propagation_partition = cd.label_propagation(LFR_G) propagation_labels = extract_communities_list(propagation_partition.communities) nmi_propagation.append(normalized_mutual_info_score(true_labels, propagation_labels)) #Plot NMI scores nmi_graph = plt.gca() nmi_graph.set_xlim([0, 0.9]) nmi_graph.set_ylim([-0.1, 1]) nmi_graph.plot(np.arange(0.1, 1, 0.1), nmi_infomap, color='#575757', marker='o', mfc='#f1362b', mec='#f1362b', label="Infomap") nmi_graph.plot(np.arange(0.1, 1, 0.1), nmi_eigenvector, color='#575757', marker='o', mfc='#17c436', mec='#17c436', label="Eigenvector") nmi_graph.plot(np.arange(0.1, 1, 0.1), nmi_leiden, color='#575757', marker='o', mfc='#d9a000', mec='#d9a000', label="Leiden") nmi_graph.plot(np.arange(0.1, 1, 0.1), nmi_walktrap, color='#575757', marker='o', mfc='#532ce9', mec='#532ce9', label="Walktrap")
cs.erdos_renyi_modularity().score)) print("{0:>15s} | {1:.6f}".format( 'Robustness', cs.normalized_mutual_information(alg(G)).score)) print("{0:>15s} | {1:.1f} sec\n".format('Timing', time() - tic)) return cs # G = read('toy') G = read('karate') # G = read('women') # G = read('dolphins') # G = read('got-appearance') # G = read('diseasome') # G = read('wars') # G = read('transport') # G = read('java') # G = read('imdb') # G = read('wikileaks') info(G) clusters(G, lambda G: algorithms.girvan_newman(G, level=1)) clusters(G, lambda G: algorithms.label_propagation(G)) cs = clusters(G, lambda G: algorithms.louvain(G)) # clusters(G, lambda G: algorithms.leiden(G)) # clusters(G, lambda G: algorithms.sbm_dl(G)) viz.plot_network_clusters(G, cs, nx.spring_layout(G)) plt.show()