Beispiel #1
0
def benchmark_scores(samplesize=1):
    """Generate score data sets."""
    v = [
        0.1
    ]  #, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7]

    f = open("benchmarks-opt1.csv", "w+")
    f.write("Spinglass, InfoMap, Leiden, Mu\n")

    # Read and evaluate graph optimizer scores for set 1.
    for mu in v:
        print(mu)
        for i in range(samplesize):
            filename = "./Graphs/lfr-graph{}-mu-{}.txt".format(i, mu)
            g = nx.read_gpickle(filename)
            scorespinglass = 0
            try:  # There is a probability the optimizers fail, indicated by -1
                scorespinglass = benchmark_score(g, algorithms.spinglass(g))
            except:
                scorespinglass = -1
            scoreinfomap = 0
            try:
                scoreinfomap = benchmark_score(g, algorithms.infomap(g))
            except:
                scoreinfomap = -1
            scoreleiden = benchmark_score(g, algorithms.leiden(g))
            vals = (scorespinglass, scoreinfomap, scoreleiden, mu)
            f.write("%f, %f, %f, %f\n" % vals)
            print("%f, %f, %f, %f" % vals)
    f.close()

    f = open("benchmarks-opt2.csv", "w+")
    f.write("Spinglass, InfoMap, Leiden, Mu\n")

    # Read and evaluate graph optimizer scores for set 2.
    for mu in v:
        print(mu)
        for i in range(samplesize):
            filename = "./Graphs/lfr2-graph{}-mu-{}.txt".format(i, mu)
            g = nx.read_gpickle(filename)
            scorespinglass = 0
            try:
                scorespinglass = benchmark_score(g, algorithms.spinglass(g))
            except:
                scorespinglass = -1
            scoreinfomap = 0
            try:
                scoreinfomap = benchmark_score(g, algorithms.infomap(g))
            except:
                scoreinfomap = -1
            scoreleiden = benchmark_score(g, algorithms.leiden(g))
            vals = (scorespinglass, scoreinfomap, scoreleiden, mu)
            f.write("%f, %f, %f, %f\n" % vals)
            print("%f, %f, %f, %f" % vals)
    f.close()
Beispiel #2
0
 def test_spinglass(self):
     g = get_string_graph()
     com = algorithms.spinglass(g)
     self.assertEqual(type(com.communities), list)
     if len(com.communities) > 0:
         self.assertEqual(type(com.communities[0]), list)
         self.assertEqual(type(com.communities[0][0]), str)
 def spinglass(self):
     template = pd.read_csv("data/communities_template.node", " ", header='infer')
     saveTo = "Results/Communities/" + self.name + "_spinglass_communities.node"
     G = nx.Graph(self.g)
     result = algorithms.spinglass(G)
     modularity = result.newman_girvan_modularity().score
     significance = result.significance().score
     communities = result.to_node_community_map()
     n_communities = list(communities.values())[-1][0] + 1
     print("\nSpinglass Algorithm: ")
     print("#Communities: ", n_communities)
     print("Modularity: ", modularity)
     print("Significance: ", significance)
     c = self.get_ordered_communities(communities)
     template['Spinglass'] = c
     print("\n")
     template['Degree'] = [v for v in self.centrality]
     template['RegionName'] = self.region_names
     pd.DataFrame.to_csv(template, saveTo, " ", header=False, index=False)
     return c
Beispiel #4
0
                                                weight='weight',
                                                **clust_kwargs)
 #elif(options.method == 'infomap'):
 #	communities = algorithms.infomap(g)
 elif (options.method == 'label_propagation'):
     communities = algorithms.label_propagation(g, **clust_kwargs)
 elif (options.method == 'markov_clustering'):
     communities = algorithms.markov_clustering(g, **clust_kwargs)
 elif (options.method == 'rber_pots'):
     communities = algorithms.rber_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'rb_pots'):
     communities = algorithms.rb_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'significance_communities'):
     communities = algorithms.significance_communities(g, **clust_kwargs)
 elif (options.method == 'spinglass'):
     communities = algorithms.spinglass(g, **clust_kwargs)
 elif (options.method == 'surprise_communities'):
     communities = algorithms.surprise_communities(g, **clust_kwargs)
 elif (options.method == 'walktrap'):
     communities = algorithms.walktrap(g, **clust_kwargs)
 #elif(options.method == 'sbm_dl'):
 #	communities = algorithms.sbm_dl(g)
 #elif(options.method == 'sbm_dl_nested'):
 #	communities = algorithms.sbm_dl_nested(g)
 elif (options.method == 'lais2'):
     communities = algorithms.lais2(g, **clust_kwargs)
 elif (options.method == 'big_clam'):
     communities = algorithms.big_clam(g, **clust_kwargs)
 elif (options.method == 'danmf'):
     communities = algorithms.danmf(g, **clust_kwargs)
 elif (options.method == 'ego_networks'):
Beispiel #5
0
# Si applicano gli algoritmi di community discovery e si salvano i risultati su file.
#
# ATTENZIONE: richiede qualche minuto
#
# CONSIGLIO: passare alla cella successiva che carica i risultati da file

# In[30]:

accuracy_spinglass = 0
accuracy_eigenvector = 0
accuracy_leiden = 0
accuracy_cpm = 0
accuracy_rber_pots = 0

for i in range(10):
    result_spinglass_tmp = algorithms.spinglass(g1)
    result_eigenvector_tmp = algorithms.eigenvector(g1)
    result_leiden_tmp = algorithms.leiden(g1)
    result_cpm_tmp = algorithms.cpm(g1, resolution_parameter=.00018)
    result_rber_pots_tmp = algorithms.rber_pots(g1, resolution_parameter=.32)

    #definizione colonne che servono per calcolare l'accuracy
    nodes1['community_spinglass'] = -1
    for i in range(len(result_spinglass_tmp.communities)):
        for j in result_spinglass_tmp.communities[i]:
            nodes1.loc[j, 'community_spinglass'] = i
    nodes1['community_eigenvector'] = -1
    for i in range(len(result_eigenvector_tmp.communities)):
        for j in result_eigenvector_tmp.communities[i]:
            nodes1.loc[j, 'community_eigenvector'] = i
    nodes1['community_leiden'] = -1