コード例 #1
0
    def test_markov_clustering(self):
        g = get_string_graph()

        communities = algorithms.markov_clustering(g)
        self.assertEqual(type(communities.communities), list)
        if len(communities.communities) > 0:
            self.assertEqual(type(communities.communities[0]), list)
            if len(communities.communities[0]) > 0:
                self.assertEqual(type(communities.communities[0][0]), str)

        g = nx.karate_club_graph()

        communities = algorithms.markov_clustering(g)
        self.assertEqual(type(communities.communities), list)
        if len(communities.communities) > 0:
            self.assertEqual(type(communities.communities[0]), list)
            if len(communities.communities[0]) > 0:
                self.assertEqual(type(communities.communities[0][0]), int)
コード例 #2
0
 elif (options.method == 'edmot'):
     communities = algorithms.edmot(g, **clust_kwargs)
 elif (options.method == 'eigenvector'):
     communities = algorithms.eigenvector(g, **clust_kwargs)
 elif (options.method == 'gdmp2'):
     communities = algorithms.gdmp2(g, **clust_kwargs)
 elif (options.method == 'greedy_modularity'):
     communities = algorithms.greedy_modularity(g,
                                                weight='weight',
                                                **clust_kwargs)
 #elif(options.method == 'infomap'):
 #	communities = algorithms.infomap(g)
 elif (options.method == 'label_propagation'):
     communities = algorithms.label_propagation(g, **clust_kwargs)
 elif (options.method == 'markov_clustering'):
     communities = algorithms.markov_clustering(g, **clust_kwargs)
 elif (options.method == 'rber_pots'):
     communities = algorithms.rber_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'rb_pots'):
     communities = algorithms.rb_pots(g, weights='weight', **clust_kwargs)
 elif (options.method == 'significance_communities'):
     communities = algorithms.significance_communities(g, **clust_kwargs)
 elif (options.method == 'spinglass'):
     communities = algorithms.spinglass(g, **clust_kwargs)
 elif (options.method == 'surprise_communities'):
     communities = algorithms.surprise_communities(g, **clust_kwargs)
 elif (options.method == 'walktrap'):
     communities = algorithms.walktrap(g, **clust_kwargs)
 #elif(options.method == 'sbm_dl'):
 #	communities = algorithms.sbm_dl(g)
 #elif(options.method == 'sbm_dl_nested'):
コード例 #3
0
from cdlib import algorithms
import networkx as nx
import pickle

G = nx.read_edgelist('HPO_String_Analysis/HPO_String_edgelist.tsv',
                     delimiter='\t')
# G = nx.karate_club_graph()

coms = algorithms.markov_clustering(G)
pickle.dump(coms, open('CommunityDetection/markov_coms.pickle', 'wb'))

cd = coms.to_node_community_map()

with open('CommunityDetection/markov.txt', 'w') as outfile:
    for key in cd.keys():
        outfile.write(key + '\t' + '\t'.join([str(x) for x in cd[key]]) + '\n')
コード例 #4
0
    louvian_partition = cd.louvain(LFR_G)
    louvian_labels = extract_communities_list(louvian_partition.communities)
    nmi_louvian.append(normalized_mutual_info_score(true_labels, louvian_labels))

    ############################### Leiden ###############################
    leiden_partition = cd.leiden(LFR_G)
    leiden_labels = extract_communities_list(leiden_partition.communities)
    nmi_leiden.append(normalized_mutual_info_score(true_labels, louvian_labels))

    ############################### Walktrap ###############################
    walktrap_partition = cd.walktrap(LFR_G)
    walktrap_labels = extract_communities_list(walktrap_partition.communities)
    nmi_walktrap.append(normalized_mutual_info_score(true_labels, walktrap_labels))

    ############################### Markov Clustering ###############################
    markov_partition = cd.markov_clustering(LFR_G)
    markov_labels = extract_communities_list(markov_partition.communities)
    nmi_markov.append(normalized_mutual_info_score(true_labels, markov_labels))

    ############################### Greedy ###############################
    greedy_partition = cd.greedy_modularity(LFR_G)
    greedy_labels = extract_communities_list(greedy_partition.communities)
    nmi_greedy.append(normalized_mutual_info_score(true_labels, greedy_labels))

    ############################### Label Propagation ###############################
    propagation_partition = cd.label_propagation(LFR_G)
    propagation_labels = extract_communities_list(propagation_partition.communities)
    nmi_propagation.append(normalized_mutual_info_score(true_labels, propagation_labels))

#Plot NMI scores
nmi_graph = plt.gca()
############################### Walktrap ###############################
start_time = time.time()
walktrap_partition = cd.walktrap(G)
walktrap_time = time.time() - start_time
walktrap_communities = extract_communities_list(walktrap_partition.communities,
                                                G)
walktrap_partitions = get_partitions(walktrap_communities)
nmi_walktrap = normalized_mutual_info_score(true_communities,
                                            walktrap_communities)
ari_walktrap = adjusted_rand_score(true_communities, walktrap_communities)
vi_walktrap = variation_of_information(true_partitions, walktrap_partitions)

############################### Markov Clustering ###############################
start_time = time.time()
markov_partition = cd.markov_clustering(G)
markov_time = time.time() - start_time
markov_communities = extract_communities_list(markov_partition.communities, G)
markov_partitions = get_partitions(markov_communities)
nmi_markov = normalized_mutual_info_score(true_communities, markov_communities)
ari_markov = adjusted_rand_score(true_communities, markov_communities)
vi_markov = variation_of_information(true_partitions, markov_partitions)

############################### Greedy ###############################
start_time = time.time()
greedy_partition = cd.greedy_modularity(G)
greedy_time = time.time() - start_time
greedy_communities = extract_communities_list(greedy_partition.communities, G)
greedy_partitions = get_partitions(greedy_communities)
nmi_greedy = normalized_mutual_info_score(true_communities, greedy_communities)
ari_greedy = adjusted_rand_score(true_communities, greedy_communities)
コード例 #6
0
pIn = np.sum(psi.T, axis=0)
# print('ProbIn: {}'.format(pIn))
print('No access: {}'.format(list(np.where(pIn == 0)[0])))
psiN = normalize(psi, axis=1, norm='l2')
# (fig, ax) = pts.plotMatrix(psiN)
# fun.quickSave(fig, ax, PT_IMG, 'transitions.png', dpi=2000)
##############################################################################
# Transitions Matrix and Base Netowrk
##############################################################################
G = nx.from_numpy_matrix(psiN)
G.remove_edges_from(nx.selfloop_edges(G))
G = fun.calcNetworkDistance(G)
##############################################################################
# Community Detection
##############################################################################
coms = cd.markov_clustering(G)
comsID = coms.communities
ptsNum = points.shape[0]
labelsN = [fun.find_in_list_of_list(comsID, i)[0] for i in range(ptsNum)]
##############################################################################
# Geographic Clustering
##############################################################################
kmeans = AgglomerativeClustering(n_clusters=len(comsID)).fit(longlats)
labels = list(kmeans.labels_)
# Reshape to export list of lists of clusters --------------------------------
comsKM = [[] for i in range(len(comsID))]
ids = list(range(ptsNum))
for i in range(ptsNum):
    comsKM[labels[i]].append(i)
##############################################################################
# Plot