Beispiel #1
0
def consensus_modularity(adjacency, gamma=1, B='modularity',
                         repeats=250, null_func=np.mean, seed=None):
    """
    Finds community assignments from `adjacency` through consensus

    Performs `repeats` iterations of community detection on `adjacency` and
    then uses consensus clustering on the resulting community assignments.

    Parameters
    ----------
    adjacency : (N, N) array_like
        Adjacency matrix (weighted/non-weighted) on which to perform consensus
        community detection.
    gamma : float, optional
        Resolution parameter for modularity maximization. Default: 1
    B : str or (N, N) array_like, optional
        Null model to use for consensus clustering. If `str`, must be one of
        ['modularity', 'potts', 'negative_sym', 'negative_asym']. Default:
        'modularity'
    repeats : int, optional
        Number of times to repeat Louvain algorithm clustering. Default: 250
    null_func : callable, optional
        Function used to generate null model when performing consensus-based
        clustering. Must accept a 2D array as input and return a single value.
        Default: `np.mean`
    seed : {int, np.random.RandomState instance, None}, optional
        Seed for random number generation. Default: None

    Returns
    -------
    consensus : (N,) np.ndarray
        Consensus-derived community assignments
    Q_all : array_like
        Optimized modularity over all `repeats` community assignments
    zrand_all : array_like
        z-Rand score over all pairs of `repeats` community assignment vectors

    References
    ----------
    Bassett, D. S., Porter, M. A., Wymbs, N. F., Grafton, S. T., Carlson,
    J. M., & Mucha, P. J. (2013). Robust detection of dynamic community
    structure in networks. Chaos: An Interdisciplinary Journal of Nonlinear
    Science, 23(1), 013142.
    """

    # generate community partitions `repeat` times
    comms, Q_all = zip(*[bct.community_louvain(adjacency, gamma=gamma, B=B)
                         for i in range(repeats)])
    comms = np.column_stack(comms)

    # find consensus cluster assignments across all partitoning solutions
    consensus = cluster.find_consensus(comms, null_func=null_func, seed=seed)

    # get z-rand statistics for partition similarity (n.b. can take a while)
    zrand_all = _zrand_partitions(comms)

    return consensus, np.array(Q_all), zrand_all
Beispiel #2
0
def find_communities_louvain(g, gamma=1):
    """The louvain algorithm for community (module) structure.

    The input should be a graph in networkx graph format.  This
    function adds the "louvain" property to each of the nodes,
    describing which module (community) the node belongs to.  It
    returns a dictionary, where each index is a node and the value is
    the value is an integer representing the community index that node
    is a part of.
    """
    assert type(g) == networkx.classes.graph.Graph, "Not a graph"
    #assert networkx.is_connected(g), "Graph not connected"
    lmodule = bct.community_louvain(numpy.asarray(networkx.to_numpy_matrix(g)), gamma=gamma)
    c = dict(zip(g.nodes(), list(map(int, lmodule[0]))))
    networkx.set_node_attributes(g, 'louvain', c)
    return c
Beispiel #3
0
def genlouvain_over_time(graphs, gamma=1., omega=1.):
    """Calculate genlouvain partition over time

    Parameters
    ----------
    graphs : array, shape (n_nodes, n_nodes, n_times)

    Returns
    -------
    partition_time : array, shape(n_nodes, n_times)
    q_time : float
    """
    n_nodes, _, n_time = graphs.shape
    conn_matrix = np.zeros([n_nodes * n_time, n_nodes * n_time])
    twomu = 0
    for t in range(n_time):
        this_mat = np.squeeze(graphs[..., t])

        k = np.sum(this_mat, axis=0)
        twom = np.sum(k)
        twomu = twomu + twom

        indx = np.arange(n_nodes) + t * n_nodes
        iinds, jinds = np.meshgrid(indx, indx)
        conn_matrix[iinds, jinds] = this_mat - gamma * np.outer(k, k) / twom

    twomu = twomu + 2 * omega * n_nodes * (n_time - 1)
    n_rows = n_nodes * n_time
    diags = np.diag(np.ones(n_rows), -n_nodes) + np.diag(
        np.ones(n_rows), n_nodes)
    diags = diags[:n_nodes * n_time, :n_nodes * n_time]
    conn_matrix = conn_matrix + omega * diags

    # Generate the louvain partition
    partition_time, q_time = bct.community_louvain(np.ones_like(conn_matrix),
                                                   gamma=gamma,
                                                   B=conn_matrix)
    q_time = q_time / twomu

    # Reshape so time ix X axis
    partition_multi = partition_time.reshape(n_time, n_nodes).T
    return partition_multi, q_time
Beispiel #4
0
def calculate_communitites(tissue_name):
    print(tissue_name + " calculation started...")

    # Creating a dict for all the results to be printed out
    results_dict = {tissue_name: {'q1s': []}}

    corr_mat = pd.read_pickle("data/corr_" + tissue_name + ".pkl")

    corr_mat = corr_mat.replace([np.inf], np.nan).fillna(0)

    corr_arr = corr_mat.values.copy()
    corr_mat = None  # trying to free memory
    corr_arr[(corr_arr > -0.8) & (corr_arr < 0.8)] = 0

    # About negative_asym: https://www.sciencedirect.com/science/article/pii/S105381191100348X
    # Iterative community finetuning.
    m = None
    q0 = -1
    q1 = 0
    while q1 - q0 > 1e-5:
        q0 = q1
        (m, q1) = bct.community_louvain(corr_arr, ci=m, B='negative_asym')
        results_dict[tissue_name]['q1s'].append(q1)
        # print(tissue_name + " q1 = " + str(q1))

    results_dict[tissue_name]['no_com'] = len(np.unique(m))
    # print("Number of communitites: " + str(len(np.unique(m))))

    uniq_arrs = np.unique(np.unique(m, return_counts=True)[1],
                          return_counts=True)
    results_dict[tissue_name]['uniq_arrs'] = uniq_arrs
    # for i in range(len(uniq_arrs[0])):
    #    print("Communitites of size " + str(uniq_arrs[0][i]) + ": " + str(uniq_arrs[1][i]))

    # print("len(np.unique(m)) = " + str(len(np.unique(m))))
    # print("np.unique(np.unique(m, return_counts=True)[1]) = " + str(np.unique(np.unique(m, return_counts=True)[1])))

    pickle.dump((m, q1),
                open("results/louvain_modules_" + tissue_name + ".pkl", "wb"))

    return results_dict
def core_periphery_analysis(network0):
    network0 /= np.sum(network0)
    C, Q_core = bct.core_periphery_dir(network0)
    per_nodes = []
    for i in range(len(C)):
        if C[i] == 0:
            per_nodes.append(i)
    G = nx.from_numpy_matrix(network0)
    G_per = G.subgraph(per_nodes)
    per_network = np.array(nx.to_numpy_matrix(G_per))
    M_per, Q_comm_per = bct.community_louvain(per_network)
    print(Q_comm_per, "Q")
    # print(M_per, Q_comm_per)
    per_comm_assignments = {}
    for i in range(len(per_nodes)):
        per_comm_assignments[per_nodes[i]] = M_per[i]
    classifications = [
        [], [], []
    ]  # index 0 means periphery-periphery edge, 1 means periphery-core, 2 means core-core
    for i in range(len(network0) - 1):
        for j in range(i + 1, len(network0)):
            if network0[i][j] > 0:
                classifications[C[i] + C[j]].append((i, j))
    return classifications, per_comm_assignments, G_per, M_per
def consensus_iterative(C):
    '''
    CONSENSUS_ITERATIVE     Construct a consensus (representative) partition
    using the iterative thresholding procedure
    [S2 Q2 X_new3 qpc] = CONSENSUS_ITERATIVE(C) identifies a single
    representative partition from a set of C partitions, based on
    statistical testing in comparison to a null model. A thresholded nodal
    association matrix is obtained by subtracting a random nodal
    association matrix (null model) from the original matrix. The
    representative partition is then obtained by using a Generalized
    Louvain algorithm with the thresholded nodal association matrix.
    NOTE: This code requires genlouvain.m to be on the MATLAB path
    Inputs:     C,      pxn matrix of community assignments where p is the
                        number of optimizations and n the number of nodes

    Outputs:    S2,     pxn matrix of new community assignments
                Q2,     associated modularity value
                X_new3, thresholded nodal association matrix
                qpc,    quality of the consensus (lower == better)

    Bassett, D. S., Porter, M. A., Wymbs, N. F., Grafton, S. T., Carlson,
    J. M., & Mucha, P. J. (2013). Robust detection of dynamic community
    structure in networks. Chaos: An Interdisciplinary Journal of Nonlinear
    Science, 23(1), 013142.
    '''


    # Number of partitions

    npart = len(C[:,0])

    # size of the network

    m = len(C[0,:])


    # Initialize


    # Permuted version of C

    C_rand3 = np.zeros(shape = np.shape(C), dtype = np.int)

    # Nodal association matrix for C

    X = np.zeros(shape =(m,m), dtype = np.double)

    # Random nodal association matrix for C_rand3

    X_rand3 = np.zeros(shape =(m,m), dtype = np.double)


    # NODAL ASSOCIATION MATRIX


    # try a random permutation approach

    for i in range(npart):
        pr = np.random.permutation(range(m))

        # C_rand3 is the same as C, but with each row permuted

        C_rand3[i,:] = C[i,pr]


    # Calculate the nodal association matrices X and X_rand3

    for i in range(npart):
        print i
        for k in range(m):
            for p in range(m):

                # element (i,j) indicate the number of times node i and node j
                # have been assigned to the same community

                if C[i,k] == C[i,p]:
                    X[k,p] = X[k,p] + 1
                else:
                    X[k,p] = X[k,p] + 0


                # element (i,j) indicate the number of times node i and node j
                # are expected to be assigned to the same community by chance

                if C_rand3[i,k] == C_rand3[i,p]:
                    X_rand3[k,p] = X_rand3[k,p] + 1
                else:
                    X_rand3[k,p] = X_rand3[k,p]+ 0



    # THRESHOLDING
    # keep only associated assignments that occur more often than expected in
    # the random data

    X_new3 = np.zeros(shape=(m,m), dtype = np.double)
    X_new3[X > np.amax(np.triu(X_rand3,k = 1))] = X[X > np.amax(np.triu(X_rand3, k = 1))]


    # GENERATE THE REPRESENTATIVE PARTITION


    # recompute optimal partition on this new matrix of kept community
    # association assignments

    S2 = np.zeros(shape=(npart, m), dtype = np.int)
    Q2 = np.zeros(shape=(npart,1),  dtype = np.double)
    for i in range(npart):
        print i

        # [S2(i,:) Q2(i)] = multislice_static_unsigned(X_new3,1);

        [S,Q] = bct.community_louvain(W=X_new3,gamma=0)
        S2[i,:] = S
        Q2[i]  = Q

    # define the quality of the consensus

    qpc = np.sum(abs(np.diff(S2, axis=0)))
    return (S2, Q2, X_new3, qpc)
Beispiel #7
0
import numpy as np
import bct
from brainroisurf.nilearndrawmembership import draw_parcellation_multiview

if __name__ == '__main__':
    template = 'data/templates/atlas/template_638.nii.gz'
    surf_left = 'data/brainmeshes/BrainMesh_ICBM152Left_smoothed.nv.gz'
    surf_right = 'data/brainmeshes/BrainMesh_ICBM152Right_smoothed.nv.gz'
    A = np.loadtxt('data/matrices/Coactivation_matrix_weighted.adj')
    memb, q = bct.community_louvain(
        A, gamma=1.5)  # run community detection on matrix A
    memb = np.asarray(memb)  # convert to numpy array
    memb = (memb - memb.min()) + 1  # from 1 to C
    draw_parcellation_multiview(template, surf_left, surf_right, memb,
                                'coactivation_louvain_fullview.png')
Beispiel #8
0
def test_community_louvain():
    x = load_sample(thres=0.4)
    seed = 39185
    ci, q = bct.community_louvain(x, seed=seed)
    print(q)
    assert np.allclose(q, 0.2583, atol=0.015)
Beispiel #9
0
    # adjust
    adjust_cm = (cm - np.mean(cm)) / np.std(cm)
    group_cm.append(cm)

# Group mean
mean_cm = np.array(np.mean(group_cm, 0))

# Gordon sorting
cm_sort = conn_calc.gordon_sort(mean_cm)

# Gordon plotting
plt.rcParams['figure.figsize'] = [10, 10]
plotting.rdbu_heatmap(cm_sort[0], vmax=1, vmin=-1, lines=cm_sort[1], center=0)

# Glasser plotting
com = bct.community_louvain(mean_cm, gamma=1.2, B='negative_sym')
order = []
for network in np.unique(com[0]):
    order = order + np.where(com[0] == network)[0].tolist()
sns.heatmap(cm[order][:, order], cmap='RdBu_r', vmax=1, vmin=-1, center=0)

# Make zip
for subject in subject_list:
    src = f"{results_dir}/{subject}/{subject}_{atlas}_pearson.csv"
    dst = f"{results_dir}/collated/{subject}_{atlas}_pearson.csv"
    shutil.copy(src, dst)

# demo details
demo_df = pd.read_csv(
    "/home/k1201869/e_i_modelling/results/hcp_processed/collated/ndar_subject01.txt",
    sep="\t")
Beispiel #10
0
def test_community_louvain():
    x = load_sample(thres=0.4)
    seed = 39185
    ci, q = bct.community_louvain(x, seed=seed)
    print(q)
    assert np.allclose(q, 0.2583, atol=0.015)
Beispiel #11
0
def process(data):
    ci, q = bct.community_louvain(data, gamma=1, B='modularity', seed=SEED)
    return ci  # TODO: report q?
Beispiel #12
0
c_a = bct.community_louvain

G_w, connectivity_adj, threshold_visual_style = Graph_Analysis(
    spearman_connectivity,
    community_alg=c_a,
    thresh_func=t_f,
    threshold=t,
    plot_threshold=plot_t,
    print_options={'lookup': {}},
    plot_options={'inline': False})

# distance graph
t = 1
plot_t = get_fully_connected_threshold(distance_connectivity, .1)
t_f = bct.threshold_proportional
c_a = lambda x: bct.community_louvain(x, gamma=1)

G_w, connectivity_adj, visual_style = Graph_Analysis(
    distance_connectivity,
    community_alg=c_a,
    thresh_func=t_f,
    threshold=t,
    plot_threshold=plot_t,
    print_options={'lookup': {}},
    plot_options={'inline': False})

# signed graph
t = 1
t_f = threshold_proportional_sign
c_a = bct.modularity_louvain_und_sign
Beispiel #13
0
def modularity_maximisation(consensus_mx):
    partition, _ = bct.community_louvain(consensus_mx)

    return partition
Beispiel #14
0
def consensus_modularity(adjacency,
                         gamma=1, B='modularity',
                         repeats=250,
                         null_func=np.mean):
    """
    Parameters
    ----------
    adjacency : (N x N) array_like
        Non-negative adjacency matrix
    gamma : float, optional
        Weighting parameters used in modularity maximization. Default: 1
    B : str or array_like, optional
        Null model for modularity maximization. Default: 'modularity'
    repeats : int, optional
        Number of times to repeat community detection (via modularity
        maximization). Generated community assignments will be combined into a
        consensus matrix. Default: 250
    null_func : function, optional
        Function that can accept an array and return a single number. This is
        used during the procedure that generates the consensus community
        assignment vector from the `repeats` individual community assignment
        vectors. Default: numpy.mean

    Returns
    -------
    consensus : np.ndarray
        Consensus community assignments
    Q_mean : float
        Average modularity of generated community assignment partitions
    zrand_avg : float
        Average z-Rand of generated community assignment partitions
    zrand_std : float
        Standard deviation z-Rand of generated community assignment partitions

    References
    ----------
    .. [1] Bassett, D. S., Porter, M. A., Wymbs, N. F., Grafton, S. T.,
       Carlson, J. M., & Mucha, P. J. (2013). Robust detection of dynamic
       community structure in networks. Chaos: An Interdisciplinary Journal of
       Nonlinear Science, 23(1), 013142.
    """

    # generate community partitions `repeat` times
    partitions = [bct.community_louvain(adjacency,
                                        gamma=gamma,
                                        B=B) for i in range(repeats)]

    # get community labels and Qs
    comms  = np.column_stack([f[0] for f in partitions]),
    Q_mean = np.mean([f[1] for f in partitions])

    ag = bct.clustering.agreement(comms) / repeats

    # generate null agreement matrix
    comms_null = np.zeros_like(comms)
    for n, i in enumerate(comms.T): comms_null[:, n] = np.random.permutation(i)
    ag_null = bct.clustering.agreement(comms_null) / repeats

    # get `null_func` of null agreement matrix
    tau = null_func(ag_null)

    # consensus cluster the agreement matrix unsing `tau` as threshold
    consensus = bct.clustering.consensus_und(ag, tau, 10)

    # get zrand statistics for partition similarity
    zrand_avg, zrand_std = zrand_partitions(comms)

    return consensus, Q_mean, zrand_avg, zrand_std
Beispiel #15
0
###############################################################################
# We can see some structure in the data, but we want to define communities or
# networks (groups of ROIs that are more correlated with one another than ROIs
# in other groups). To do that we'll use the Louvain algorithm from the `bctpy`
# toolbox.
#
# Unfortunately the defaults for the Louvain algorithm cannot handle negative
# data, so we will make a copy of our correlation matrix and zero out all the
# negative correlations:

import bct

nonegative = corr.copy()
nonegative[corr < 0] = 0

ci, Q = bct.community_louvain(nonegative, gamma=1.5)
num_ci = len(np.unique(ci))
print('{} clusters detected with a modularity of {:.2f}.'.format(num_ci, Q))

###############################################################################
# We'll take a peek at how the correlation matrix looks when sorted by these
# communities. We can use the :func:`~.plotting.plot_mod_heatmap` function,
# which is a wrapper around :func:`plt.imshow()`, to do this easily:

from netneurotools import plotting

plotting.plot_mod_heatmap(corr, ci, vmin=-1, vmax=1, cmap='viridis')

###############################################################################
# The Louvain algorithm is greedy so different instantiations will return
# different community assignments. We can run the algorithm ~100 times to see
def consensus_iterative(C):
    '''
    CONSENSUS_ITERATIVE     Construct a consensus (representative) partition
    using the iterative thresholding procedure
    [S2 Q2 X_new3 qpc] = CONSENSUS_ITERATIVE(C) identifies a single
    representative partition from a set of C partitions, based on
    statistical testing in comparison to a null model. A thresholded nodal
    association matrix is obtained by subtracting a random nodal
    association matrix (null model) from the original matrix. The
    representative partition is then obtained by using a Generalized
    Louvain algorithm with the thresholded nodal association matrix.
    NOTE: This code requires genlouvain.m to be on the MATLAB path
    Inputs:     C,      pxn matrix of community assignments where p is the
                        number of optimizations and n the number of nodes

    Outputs:    S2,     pxn matrix of new community assignments
                Q2,     associated modularity value
                X_new3, thresholded nodal association matrix
                qpc,    quality of the consensus (lower == better)

    Bassett, D. S., Porter, M. A., Wymbs, N. F., Grafton, S. T., Carlson,
    J. M., & Mucha, P. J. (2013). Robust detection of dynamic community
    structure in networks. Chaos: An Interdisciplinary Journal of Nonlinear
    Science, 23(1), 013142.
    '''

    # Number of partitions

    npart = len(C[:, 0])

    # size of the network

    m = len(C[0, :])

    # Initialize

    # Permuted version of C

    C_rand3 = np.zeros(shape=np.shape(C), dtype=np.int)

    # Nodal association matrix for C

    X = np.zeros(shape=(m, m), dtype=np.double)

    # Random nodal association matrix for C_rand3

    X_rand3 = np.zeros(shape=(m, m), dtype=np.double)

    # NODAL ASSOCIATION MATRIX

    # try a random permutation approach

    for i in range(npart):
        pr = np.random.permutation(range(m))

        # C_rand3 is the same as C, but with each row permuted

        C_rand3[i, :] = C[i, pr]

    # Calculate the nodal association matrices X and X_rand3

    for i in range(npart):
        print i
        for k in range(m):
            for p in range(m):

                # element (i,j) indicate the number of times node i and node j
                # have been assigned to the same community

                if C[i, k] == C[i, p]:
                    X[k, p] = X[k, p] + 1
                else:
                    X[k, p] = X[k, p] + 0

                # element (i,j) indicate the number of times node i and node j
                # are expected to be assigned to the same community by chance

                if C_rand3[i, k] == C_rand3[i, p]:
                    X_rand3[k, p] = X_rand3[k, p] + 1
                else:
                    X_rand3[k, p] = X_rand3[k, p] + 0

    # THRESHOLDING
    # keep only associated assignments that occur more often than expected in
    # the random data

    X_new3 = np.zeros(shape=(m, m), dtype=np.double)
    X_new3[X > np.amax(np.triu(X_rand3, k=1))] = X[
        X > np.amax(np.triu(X_rand3, k=1))]

    # GENERATE THE REPRESENTATIVE PARTITION

    # recompute optimal partition on this new matrix of kept community
    # association assignments

    S2 = np.zeros(shape=(npart, m), dtype=np.int)
    Q2 = np.zeros(shape=(npart, 1), dtype=np.double)
    for i in range(npart):
        print i

        # [S2(i,:) Q2(i)] = multislice_static_unsigned(X_new3,1);

        [S, Q] = bct.community_louvain(W=X_new3, gamma=0)
        S2[i, :] = S
        Q2[i] = Q

    # define the quality of the consensus

    qpc = np.sum(abs(np.diff(S2, axis=0)))
    return (S2, Q2, X_new3, qpc)
Beispiel #17
0
def advanced_network_analysis(graph, kstep=1, sstep=600., outdir=None):
    """ Map structural cores to delineate network modules, and to identify
    hub regions that link distinct clusters.

    Definition:

    The k-core is the largest subnetwork comprising nodes of degree at
    least k.

    The s-core is the largest subnetwork comprising nodes of strength at
    least s.

    The optimal community structure is a subdivision of the
    network into nonoverlapping groups of nodes which maximizes the number
    of within-group edges and minimizes the number of between-group edges.

    The rich club coefficient, R, at level k is the fraction of edges that
    connect nodes of degree k or higher out of the maximum number of edges
    that such nodes might share.

    Network features:

    kcores:
        each node associated k-core number.
    klevels:
        size of s-cores when the s-level increase.
    scores:
        each node associated s-core number.
    slevels:
        size of s-cores when the s-level increase.
    community:
        the computed community structure.
    qstat:
        the objective modularity function optimized q-statistic.
    rich_clubs:
        vector of rich-club coefficients for levels 1 to klevel=the maximum
        degree of the adjacency matrix.
    global_efficiency:
        the global efficiency is the average of inverse shortest path
        length, and is inversely related to the characteristic path length.
    local_efficiency:
        the local efficiency is the global efficiency computed on the
        neighborhood of the node, and is related to the clustering coefficient.

    Parameters
    ----------
    graph: Graph
        the graph reprensenting the connectome network.
    kstep: int (optional, default 1)
        the k-core size increment.
    sstep: float (optional, default 600.)
        the s-core size increment.
    outdir: str (optional, default None)
        if specified save some snapshots.

    Returns
    -------
    outputs: dict
        the network features.
    snaps: list of file
        the generates snaps.
    """
    adjacency_matrix = numpy.ascontiguousarray(nx.to_numpy_matrix(graph))

    # Efficiency
    global_efficiency = bct.efficiency_wei(adjacency_matrix)
    local_efficiency = bct.efficiency_wei(adjacency_matrix, local=True)

    # K-core decomposition
    k = 0
    kcores = numpy.zeros(adjacency_matrix.shape[0], dtype=int)
    klevels = []
    kxs = []
    processed_indices = set()
    while True:
        if not graph.is_directed():
            kcore, kn, peelorder, peellevel = bct.kcore_bu(adjacency_matrix,
                                                           k,
                                                           peel=True)
            klevels.append(kn)
            kxs.append(k)
        else:
            kcore, kn, peelorder, peellevel = bct.kcore_bd(adjacency_matrix,
                                                           k,
                                                           peel=True)
        for indices in peelorder:
            new_indices = set(indices) - processed_indices
            processed_indices = processed_indices.union(new_indices)
            kcores[list(new_indices)] = k
        if kn == 0:
            break
        k += 1

    # S-core decompositon
    scores = numpy.zeros(adjacency_matrix.shape[0], dtype=int)
    slevels = []
    sxs = []
    processed_indices = set()
    s = 0
    while True:
        if not graph.is_directed():
            score, sn = bct.score_wu(adjacency_matrix, s)
            slevels.append(sn)
            sxs.append(s)
        else:
            raise NotImplementedError
        ff = numpy.where(score == 0)
        for node_index in ff[0]:
            if node_index in processed_indices:
                continue
            if not (score[node_index, :] == 0).all():
                continue
            scores[node_index] = s
            processed_indices.add(node_index)
        if sn == 0:
            break
        s += sstep

    # Community detection
    community, qstat = bct.community_louvain(adjacency_matrix,
                                             gamma=1,
                                             ci=None,
                                             B="modularity",
                                             seed=None)

    # Hub detection
    if not graph.is_directed():
        rich_clubs = bct.rich_club_wu(adjacency_matrix, klevel=None)
    else:
        rich_clubs = bct.rich_club_wd(adjacency_matrix, klevel=None)

    # Summarize results in a dictionnary
    params = locals()
    outputs = dict([
        (name, params[name])
        for name in ("kcores", "klevels", "kxs", "scores", "slevels", "sxs",
                     "community", "qstat", "rich_clubs", "global_efficiency",
                     "local_efficiency")
    ])

    # Snaps
    snaps = []
    if outdir is not None:
        import pylab as plt
        if not os.path.isdir(outdir):
            raise ValueError("'{0}' is not a valid directory.".format(outdir))
        for x, measures, label in [(kxs, klevels, "klevels"),
                                   (sxs, slevels, "slevels")]:
            outfile = os.path.join(outdir, label + ".png")
            snaps.append(outfile)
            plt.figure()
            plt.plot(x, measures, "-bo")
            plt.xlabel(label)
            plt.ylabel("Number of nodes")
            plt.savefig(outfile)
            plt.close()

    return outputs, snaps
def consensus_clustering_louvain2(inputMatrix, numberPartitions,
                                  consensusMatrixThreshold, LouvainMethod,
                                  gamma, seeds):
    # JUST AMENDED SO THAT WE GET Q OUT AT THE END
    # Function to implement consensus clustering as per Lancichinetti & Forunato et al 2012
    # using the Louvain algorithm as implemented by BCT
    #
    # Inputs:
    #   inputMatrix                 symmetrical weighted undirected adjacency matrix to be partiitioned
    #   numberIterations            number of times the algorithm is run to  generate the consensus matrix on each run
    #   consensusMatrixThreshold    threshold below which consensus matrix  entries are st to zero, (0 1]
    #   LouvainMethod               string of Louvain method: 'Modularity' (if no negative weights in the inputMatrix,
    #                               or 'negative_sym' / 'negative_asym' if negative weights)
    #   gamma                       resolution parameter of Louvain
    #   seeds                       'None' or an integer
    #
    # Outputs:
    #   finalPartition              final community allocaiton of each node
    #   iterations                  how many iterations to reach consensus
    #   communityAssignment         final community assignment

    consensus = False
    iterations = 0

    while not consensus:
        D = np.zeros((inputMatrix.shape[0], inputMatrix.shape[1],
                      numberPartitions))  # consensus matrix
        # generate consensus matrix
        for partition in range(numberPartitions):
            [community_allocation,
             Q] = bct.community_louvain(inputMatrix,
                                        gamma=gamma,
                                        ci=None,
                                        B=LouvainMethod,
                                        seed=seeds[partition])

            for row in range(D.shape[0]):
                for col in range(D.shape[1]):
                    D[row, col, partition] = (
                        community_allocation[row] == community_allocation[col])

        D = np.mean(
            D,
            2)  # consensus matrix...is it equal or do we need to keep going?
        iterations = iterations + 1  # keep track

        if np.unique(D).shape[
                0] < 3:  # only true if all parition matrices equal (so that their mean is either 0 or 1)
            consensus = True
            finalPartition = D
            communityAssignment = defaultdict(list)
            for community in range(
                    1,
                    np.unique(community_allocation).shape[0] + 1):
                communityAssignment[community].append(
                    np.where(community_allocation == community))

        else:
            D = np.where(D < consensusMatrixThreshold, 0, D)
            inputMatrix = D

    return (Q, finalPartition, iterations, communityAssignment)