Example #1
0
def get_fully_connected_threshold(connectivity_matrix, initial_value=.1):
    '''Get a threshold above the initial value such that the graph is fully connected
    '''
    if type(connectivity_matrix) == pd.DataFrame:
        connectivity_matrix = connectivity_matrix.as_matrix()
    threshold = initial_value
    thresholded_mat = bct.threshold_proportional(connectivity_matrix,
                                                 threshold)
    while np.any(np.max(thresholded_mat, axis=1) == 0):
        threshold += .01
        thresholded_mat = bct.threshold_proportional(connectivity_matrix,
                                                     threshold)
    return threshold
def corrmat_to_samples_by_features(subjects,
                                   session,
                                   task,
                                   condition,
                                   mask,
                                   tau,
                                   order="F",
                                   verbose=False):
    # read in every person's connectivity matrix (yikes)
    # flatten into features (edges) per sample (subject)
    # one task & condition at a time, I think. otherwise it becomes a memory issue
    conn_df = pd.DataFrame(index=subjects, columns=np.arange(0, 268**2))

    for subject in subjects:
        corrmat_path = join(
            sink_dir,
            "corrmats",
            "{0}-session-{1}_{2}-{3}_{4}-corrmat.csv".format(
                subject, session, task, condition, mask),
        )
        if verbose:
            print("sub-{0}".format(subject))
            print("corrmat at {0}".format())
        try:
            corrmat = np.genfromtxt(corrmat_path, delimiter=" ")
            thresh_corrmat = bct.threshold_proportional(corrmat,
                                                        tau,
                                                        copy=True)
            conn_df.at[subject] = np.ravel(corrmat, order="F")
        except Exception as e:
            if verbose:
                print(subject, e)
    return conn_df
Example #3
0
def iterate_modularity_partition(subject, iter):
    ''' Function to run iterations of Louvain modularity detection, return the one with the highest modularity (Q)
	usage: ci, q = iterate_modularity_partition(subject, iter)

	----
	Parameters
	----
	subject: subject number, will look for its correlation corrmat file with a hard coded path... (should prob change that)
	iter: int number indicating number of iterations

	----
	Returns
	----
	ci : community partition
	q  : modularity
	'''

    fn = '/home/despoB/kaihwang/Rest/AdjMatrices/t%s_Full_WashU333_corrmat' % subject
    AveMat = np.loadtxt(fn)
    graph = nx.from_numpy_matrix(
        bct.binarize(bct.threshold_proportional(AveMat, 0.05)))
    q = 0
    for i in xrange(0, iter):
        print(i)
        louvain = weighted_modularity.LouvainCommunityDetection(graph)
        weighted_partitions = louvain.run()
        if weighted_partitions[0].modularity() > q:
            q = weighted_partitions[0].modularity()
            weighted_partition = weighted_partitions[0]
            ci = convert_partition_dict_to_array(
                convert_partition_to_dict(weighted_partition.communities),
                len(AveMat))
    return ci, q
Example #4
0
def connected_tau(corrmat, proportional=True):
    '''
    Calculates threshold at network becomes node connected, using NetworkX's `is_connected` function.
    Parameters
    ----------
    corrmat : numpy.array
        Correlation or other connectivity matrix from which tau_connected will be estimated.
        Should be values between 0 and 1.
    proportional : bool
        Determines whether connectivity matrix is thresholded proportionally or absolutely.
        Default is proportional as maintaining network density across participants is a priority
    Returns
    -------
    tau : float
        Highest vaue of tau (threshold) at which network becomes node-connected.
    '''
    tau = 1
    connected = False
    while connected == False:
        if proportional:
            w = bct.threshold_proportional(corrmat, tau)
        else:
            w = bct.threshold_absolute(corrmat, tau)
        w_nx = nx.convert_matrix.from_numpy_array(w)
        connected = nx.algorithms.components.is_connected(w_nx)
        tau -= 0.01
    return tau
Example #5
0
def scale_free_tau(corrmat, skew_thresh, proportional=True):
    ''''
    Calculates threshold at which network becomes scale-free, estimated from the skewness of the networks degree distribution.
    Parameters
    ----------
    corrmat : numpy.array
        Correlation or other connectivity matrix from which tau_connected will be estimated.
        Should be values between 0 and 1.
    proportional : bool
        Determines whether connectivity matrix is thresholded proportionally or absolutely.
        Default is proportional as maintaining network density across participants is a priority
    Returns
    -------
    tau : float
        Lowest vaue of tau (threshold) at which network is scale-free.
    '''
    tau = 0.01
    skewness = 1
    while abs(skewness) > 0.3:
        if proportional:
            w = bct.threshold_proportional(corrmat, tau)
        else:
            w = bct.threshold_absolute(corrmat, tau)
        skewness = skew(bct.degrees_und(w))
        tau += 0.01
    return tau
Example #6
0
def cal_dynamic_graph(MTD, impose=False, threshold=False):
    '''calculate graph metrics across time(dynamic)'''
    #setup outputs
    time_points = MTD.shape[0]
    ci = np.zeros([MTD.shape[1], MTD.shape[0]])
    q = np.zeros([MTD.shape[0]])
    WMD = np.zeros([MTD.shape[1], MTD.shape[0]])
    PC = np.zeros([MTD.shape[1], MTD.shape[0]])
    WW = np.zeros([MTD.shape[1], MTD.shape[0]])
    BW = np.zeros([MTD.shape[1], MTD.shape[0]])

    #modularity
    if impose:
        ci = np.tile(
            np.loadtxt(
                '/home/despoB/kaihwang/Rest/ThaGate/ROIs/Morel_Striatum_Gordon_CI'
            ), [time_points, 1]).T

    for i, t in enumerate(range(0, time_points)):
        matrix = MTD[i, :, :]

        #need to deal with NANs because of coverage (no signal in some ROIs)
        matrix[np.isnan(matrix)] = 0

        #threshold here
        if threshold:
            matrix = bct.threshold_proportional(matrix, threshold)

        #modularity
        if impose == False:
            ci[:, i], q[i] = bct.modularity_louvain_und_sign(matrix)

        #PC
        # for now, no negative weights
        matrix[matrix < 0] = 0
        PC[:, i] = bct.participation_coef(matrix, ci[:, i])

        #WMD
        WMD[:, i] = bct.module_degree_zscore(matrix, ci[:, i])

        ## within weight
        WW[:, i] = cal_within_weight(matrix, ci[:, i])

        ## between Weight
        BW[:, i] = cal_between_weight(matrix, ci[:, i])

        # cal q using impsose CI partition
        if impose:
            q[i] = cal_modularity_w_imposed_community(matrix, ci[:, i])

    return ci, q, PC, WMD, WW, BW
Example #7
0
def compute(matrix, thresholds, measure, args):
    '''
    matrix : array
    measure : function from bctpy
    '''
    from bct import measure, threshold_proportional

    metrics = []
    for p in np.arange(thresholds[0], thresholds[1], 0.01):
        thresh = threshold_proportional(matrix, p, copy=True)
        metric = measure(thresh, args)
        metrics.append(metric)
    auc = np.trapz(metrics, dx=0.01)
    return auc
Example #8
0
def test_modularity_finetune_und_sign_actually_finetune():
    x = load_signed_sample()
    seed = 34908314
    ci, oq = bct.modularity_louvain_und_sign(x, seed=seed)
    _, q = bct.modularity_finetune_und_sign(x, seed=seed, ci=ci)
    print(q)
    assert np.allclose(q, .47282924)
    assert q >= oq

    seed = 88215881
    np.random.seed(seed)
    randomized_sample = np.random.random(size=(len(x), len(x)))
    randomized_sample = randomized_sample + randomized_sample.T
    x[np.where(bct.threshold_proportional(randomized_sample, .2))] = 0

    ci, oq = bct.modularity_louvain_und_sign(x, seed=seed)
    print(oq)
    assert np.allclose(oq, .45254522)
    for i in range(100):
        _, q = bct.modularity_finetune_und_sign(x, ci=ci)
        assert q >= oq
Example #9
0
def test_modularity_finetune_und_sign_actually_finetune():
    x = load_signed_sample()
    seed = 34908314
    ci, oq = bct.modularity_louvain_und_sign(x, seed=seed)
    _, q = bct.modularity_finetune_und_sign(x, seed=seed, ci=ci)
    print(q)
    assert np.allclose(q, .47282924)
    assert q >= oq

    seed = 88215881
    np.random.seed(seed)
    randomized_sample = np.random.random(size=(len(x), len(x)))
    randomized_sample = randomized_sample + randomized_sample.T
    x[np.where(bct.threshold_proportional(randomized_sample, .2))] = 0

    ci, oq = bct.modularity_louvain_und_sign(x, seed=seed)
    print(oq)
    assert np.allclose(oq, .45254522)
    for i in range(100):
        _, q = bct.modularity_finetune_und_sign(x, ci=ci)
        assert q >= oq
Example #10
0
def generate_null(layout, task, session, mask):
    null_dist = pd.DataFrame(index=subjects, columns=["mean", "sdev"])
    avg_corr = avg_corrmat(layout, task, session, mask)
    eff_perm = []
    j = 1
    while j < 3:
        effs = []
        W = null_model_und_sign(avg_corr.values)
        for thresh in np.arange(0.21, 0.31, 0.03):
            thresh_corr = bct.threshold_proportional(W, thresh)
            leff = bct.efficiency_wei(thresh_corr)
            effs.append(leff)
        effs_arr = np.asarray(effs)
        leff_auc = np.trapz(effs_arr, dx=0.03, axis=0)
        eff_perm.append(leff_auc)
        j += 1
    null_dist.at[(sesh[session], task, conds[i], mask),
                 "mean"] = np.mean(eff_perm)
    null_dist.at[(sesh[session], task, conds[i], mask),
                 "sdev"] = np.std(eff_perm)
    return null_dist
Example #11
0
def cal_sFC_graph(subject, sequence, roi, impose=False, threshold=1.0):
    ''' load TS and run static FC'''
    ts_path = '/home/despoB/kaihwang/Rest/ThaGate/NotBackedUp/'
    fn = ts_path + str(subject) + '_%s_%s_000.netts' % (roi, sequence)
    ts = np.loadtxt(fn)

    matrix = np.corrcoef(ts)
    matrix[np.isnan(matrix)] = 0

    matrix = bct.threshold_proportional(matrix, threshold)

    num_iter = 200
    consensus = np.zeros((num_iter, matrix.shape[0], matrix.shape[1]))

    for i in np.arange(0, num_iter):
        ci, _ = bct.modularity_louvain_und_sign(matrix, qtype='sta')
        consensus[i, :, :] = community_matrix(ci)

    mean_matrix = np.nanmean(consensus, axis=0)
    mean_matrix[np.isnan(mean_matrix)] = 0
    CI, Q = bct.modularity_louvain_und_sign(mean_matrix, qtype='sta')

    #no negative weights
    matrix[matrix < 0] = 0

    PC = bct.participation_coef(matrix, CI)

    #WMD
    WMD = bct.module_degree_zscore(matrix, CI)

    ## within weight
    WW = cal_within_weight(matrix, CI)

    ## between Weight
    BW = cal_between_weight(matrix, CI)

    return CI, Q, PC, WMD, WW, BW
Example #12
0
def load_sample(thres=1):
    return bct.threshold_proportional(np.load(mat_path('sample_data.npy')),
                                      thres,
                                      copy=False)
def threshold_proportional_sign(W, threshold):
    sign = np.sign(W)
    thresh_W = bct.threshold_proportional(np.abs(W), threshold)
    W = thresh_W * sign
    return W
Example #14
0
                        lab_notebook.at[(subject, session, task, conds[i],
                                         mask),
                                        'start'] = str(datetime.datetime.now())
                        corrmat = np.genfromtxt(join(
                            sink_dir, sesh[session], subject,
                            '{0}-session-{1}_{2}-{3}_{4}-corrmat.csv'.format(
                                subject, session, task, conditions[i], mask)),
                                                delimiter=' ')

                        ge_s = []
                        cp_s = []
                        md_s = []
                        for p in np.arange(kappa_upper, kappa_lower, 0.01):
                            ntwk = []
                            thresh = bct.threshold_proportional(corrmat,
                                                                p,
                                                                copy=True)

                            #network measures of interest here
                            #global efficiency
                            ge = bct.efficiency_wei(thresh)
                            ge_s.append(ge)

                            #characteristic path length
                            cp = bct.charpath(thresh)
                            cp_s.append(cp[0])

                            #modularity
                            md = bct.modularity_louvain_und(thresh)
                            md_s.append(md[1])
Example #15
0
def test_threshold_proportional():
    x = load_sample()
    x = bct.threshold_proportional(x, .5, copy=True)
    assert np.allclose(np.sum(x), 22548.51206965)
Example #16
0
def test_binarize():
    x = load_sample()
    s = bct.binarize(bct.threshold_proportional(x, .41))
    assert np.sum(s) == 7752
Example #17
0
def load_directed_sample(thres=1):
    return bct.threshold_proportional(np.load(mat_path('sample_directed.npy')),
                                      thres, copy=False)
Example #18
0
import numpy as np
import bct as BCT
import sys



fn = raw_input()

print(fn)

M = np.loadtxt(fn)

Q_vec = np.zeros(len(np.arange(0.02, 0.21, 0.01)))

for i, th in enumerate(np.arange(0.02, 0.21, 0.01)):
    Q_vec[i] = BCT.modularity_und(BCT.threshold_proportional(M, th))[1]

Q = Q_vec.mean()

print(Q)
CI = np.random.randint(1, 10, size)

max_cost = .15
min_cost = .01

# import thresholded matrix to BCT, import partition, run WMD/PC
PC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
WMD = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
EC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
GC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
SC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
ST = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))

for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):

    tmp_matrix = bct.threshold_proportional(matrix, cost, copy=True)

    # # PC slow to compute, days per threshold
    # PC[i,:] = bct.participation_coef(tmp_matrix, CI)
    # fn = 'completed PC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())
    #
    # WMD seems relatively fast, maybe 10min per threshold
    WMD[i, :] = bct.module_degree_zscore(tmp_matrix, CI)
    fn = 'completed WMD calculation for %s at:' % cost
    print(fn)
    print(datetime.now())
    #
    # EC[i,:] = bct.eigenvector_centrality_und(tmp_matrix)
    # fn = 'completed EC calculation for %s at:' %cost
Example #20
0
distance_array = np.load(main_directory + 'results/distance_array.npy')
i_lower = np.tril_indices(distance_array.shape[0], -1)
distance_array[i_lower] = distance_array.T[i_lower]
distance_mask = distance_array > 10

# Generate matrix for clustering: normalisation and distance threshold
W_norm = np.zeros(connectomes.shape)
n = connectomes.shape[0]
for i in range(connectomes.shape[2]):
    individual_graph = connectomes[:, :, i]
    std_dev = np.std(np.tril(individual_graph))
    W_norm[:, :, i] = (individual_graph - (np.sum(np.sum(individual_graph)) /
                                           (n**2 - n))) / std_dev
W_mn = np.mean(W_norm, 2)
np.fill_diagonal(W_mn, 0)
thresholded_matrix = bct.threshold_proportional(W_mn, 0.05)
thresholded_matrix = np.where(thresholded_matrix > 0, 1, 0)

LouvainMethod = 'modularity'
gamma = 1.4
consensusMatrixThreshold = 0.5
numberPartition = 50
distance_thresholded_matrix = thresholded_matrix * distance_mask
seeds = range(numberPartition)
[ignore1, ignore2, community_allocation
 ] = cb.consensus_clustering_louvain(distance_thresholded_matrix,
                                     numberPartition, consensusMatrixThreshold,
                                     LouvainMethod, gamma, seeds)

# Look at how to label these networks
columns = list(apriori_communities.keys())
Example #21
0
import numpy as np
import bct as BCT
import sys

fn = raw_input()

print(fn)

M = np.loadtxt(fn)

Q_vec = np.zeros(len(np.arange(0.02, 0.21, 0.01)))

for i, th in enumerate(np.arange(0.02, 0.21, 0.01)):
    Q_vec[i] = BCT.modularity_und(BCT.threshold_proportional(M, th))[1]

Q = Q_vec.mean()

print(Q)
Example #22
0
def test_threshold_proportional_directed():
    x = load_directed_sample()
    bct.threshold_proportional(x, .28, copy=False)
    assert np.sum(x) == 3410
null_dist = pd.DataFrame(index=index, columns=["mean", "sdev"])
for session in sessions:
    print(session, datetime.datetime.now())
    for task in tasks.keys():
        print(task, datetime.datetime.now())
        for i in np.arange(0, len(tasks[task][0]["conditions"])):
            condition = tasks[task][0]["conditions"][i]
            print(condition, datetime.datetime.now())
            for mask in masks:
                print(mask, datetime.datetime.now())
                avg_corr = avg_corrmat(data_dir, subjects, task, condition,
                                       session, mask)
                eff_perm = []
                j = 1
                while j < 3:
                    effs = []
                    W = null_model_und_sign(avg_corr.values)
                    for thresh in np.arange(0.21, 0.31, 0.03):
                        thresh_corr = bct.threshold_proportional(W, thresh)
                        leff = bct.efficiency_wei(thresh_corr)
                        effs.append(leff)
                    effs_arr = np.asarray(effs)
                    leff_auc = np.trapz(effs_arr, dx=0.03, axis=0)
                    eff_perm.append(leff_auc)
                    j += 1
                null_dist.at[(sesh[session], task, conds[i], mask),
                             "mean"] = np.mean(eff_perm)
                null_dist.at[(sesh[session], task, conds[i], mask),
                             "sdev"] = np.std(eff_perm)
        null_dist.to_csv(join(sink_dir, "null_dist-local_efficiency.csv"))
Example #24
0
def test_threshold_proportional_directed():
	x = load_directed_sample()
	bct.threshold_proportional(x, .28, copy=False)
	assert np.allclose( np.sum(x), 32852.72485433 )
def threshold_proportional_sign(W, threshold):
    sign = np.sign(W)
    thresh_W = bct.threshold_proportional(np.abs(W), threshold)
    W = thresh_W * sign
    return W
Example #26
0
def load_directed_low_modularity_sample(thres=1):
	return bct.threshold_proportional(np.load('mats/sample_directed_gc.npy'),
		thres, copy=False)
Example #27
0
            #shen_corrmat = np.genfromtxt(join(sink_dir, session, 'resting-state', subject, '{0}_network_corrmat_shen2015.csv'.format(subject)), delimiter=",")

            craddock_ts = craddock_masker.fit_transform(epi_data, confounds)
            craddock_corrmat = correlation_measure.fit_transform([craddock_ts])[0]
            np.savetxt(join(sink_dir, sesh[session], subject, '{0}-session-{1}-rest_network_corrmat_craddock2012.csv'.format(subject, session)), craddock_corrmat, delimiter=",")
            #craddock_corrmat = np.genfromtxt(join(sink_dir, session, 'resting-state', subject, '{0}_network_corrmat_craddock2012.csv'.format(subject)), delimiter=",")

            ge_s = []
            ge_c = []
            cp_s = []
            cp_c = []
            md_s = []
            md_c = []
            for p in np.arange(0.1, 1, 0.1):
                ntwk = []
                shen_thresh = bct.threshold_proportional(shen_corrmat, p, copy=True)
                craddock_thresh = bct.threshold_proportional(craddock_corrmat, p, copy=True)
                #network measures of interest here
                #global efficiency
                ge = bct.efficiency_wei(shen_thresh)
                ge_s.append(ge)
                ge = bct.efficiency_wei(craddock_thresh)
                ge_c.append(ge)

                #characteristic path length
                cp = bct.charpath(shen_thresh)
                cp_s.append(cp[0])
                cp = bct.charpath(craddock_thresh)
                cp_c.append(cp[0])

                #modularity
Example #28
0
def load_sample(thres=1):
	return bct.threshold_proportional(np.load('mats/sample_data.npy'), thres,
		copy=False)
Example #29
0
def test_threshold_proportional_nocopy():
    x = load_sample()
    bct.threshold_proportional(x, .3, copy=False)
    assert np.allclose(np.sum(x), 15253.75425406)
Example #30
0
def load_signed_sample(thres=1):
	return bct.threshold_proportional(np.around(
		np.load('mats/sample_signed.npy'),8), thres, copy=False)
Example #31
0
def test_degrees_und():
    x = load_sample()
    s = bct.degrees_und(bct.threshold_proportional(x, .26))
    assert np.sum(s) == 4916
Example #32
0
        network = {}
        network_wise = {}

        #talking with Kim:
        #start threhsolding (least conservative) at the lowest threshold where you lose your negative connection weights
        #steps of 5 or 10 percent
        #citation for integrating over the range is likely in the Fundamentals of Brain Network Analysis book
        #(http://www.danisbassett.com/uploads/1/1/8/5/11852336/network_analysis_i__ii.pdf)
        #typically done: make sure your metric's value is stable across your range of thresholds
        #the more metrics you use, the more you have to correct for multiple comparisons
        #make sure this is hypothesis-driven and not fishing

        for p in thresh_range:
            ge = []
            cc = []
            ntwk_corrmat_thresh = bct.threshold_proportional(
                network_correlation_matrix, p, copy=True)
            #np.savetxt(join(sink_dir, sessions[i], s, '{0}_corrmat_Laird2011_thresh_{1}.csv'.format(s, p)), ntwk_corrmat_thresh, delimiter=',')
            #measures of interest here
            #global efficiency
            le = bct.efficiency_wei(ntwk_corrmat_thresh)
            ge.append(le)

            #clustering coefficient
            c = bct.clustering_coef_wu(ntwk_corrmat_thresh)
            cc.append(c)

            network[p] = ge
            network_wise[p] = cc

        ntwk_df = pd.Series(network).T
        #ntwk_df.columns = ['total positive', 'total negative', 'efficiency', 'path length', 'modularity']
Example #33
0
def test_normalize():
    x = load_sample()
    s = bct.normalize(bct.threshold_proportional(x, .79))
    assert np.allclose(np.sum(s), 3327.96285964)
    return (ordered_matrix, new_order, module_dict)


# numberPartitions = 500
# gamma = 1.0
# LouvainMethod = 'negative_sym'
# consensusMatrixThreshold = 0.5

# NB give very similar result if using more similar method to the brain networks :
numberPartitions = 500
gamma = 1
LouvainMethod = 'modularity'
consensusMatrixThreshold = 0.5
seed = 3
inputmatrix = bl_corr.loc['p1':'g16', 'p1':'g16']
inputmatrix2 = bct.threshold_proportional(np.asarray(inputmatrix), 0.5)
inputmatrix3 = pd.DataFrame(inputmatrix2,
                            index=inputmatrix.index,
                            columns=inputmatrix.columns)

# ch_ordered_matrix, ch_new_order, ch_module_dict = reorder_corr(ch_corr.loc['cp1':'cg16','cp1':'cg16'],
#                                                                 numberPartitions, consensusMatrixThreshold,LouvainMethod, gamma)

bl_ordered_matrix, bl_new_order, bl_module_dict = reorder_corr(
    inputmatrix3, numberPartitions, consensusMatrixThreshold, LouvainMethod,
    gamma, seed)

# Baseline
plt.rcParams['figure.figsize'] = [7, 7]
bl_tick_labels = (bl_corr.loc[:, 'p1':'g16'].columns)[bl_new_order]
bl_mod_change = np.where(bl_new_order[:-1] >= bl_new_order[1:])[0]
Example #35
0
def test_invert():
    x = load_sample()
    s = bct.invert(bct.threshold_proportional(x, .13))
    assert np.allclose(np.sum(s), 790.43107587)
Example #36
0
def load_signed_sample(thres=1):
    return bct.threshold_proportional(np.around(
        np.load(mat_path('sample_signed.npy')), 8),
                                      thres,
                                      copy=False)
Example #37
0
            subject, "fc default mode-left central executive {0}".format(condition)
        ] = corrmats[condition][12, 17]

        df.at[
            subject,
            "fc left central executive-right central executive {0}".format(condition),
        ] = corrmats[condition][14, 17]
        ge = []
        le = {}
        loceff = {}
        loceff["default mode"] = []
        loceff["left central executive"] = []
        loceff["right central executive"] = []
        for p in thresh_range:
            corrmat_thresh = bct.threshold_proportional(
                corrmats[condition], p, copy=True
            )
            # measures of interest here
            # global efficiency
            geff = bct.efficiency_wei(corrmat_thresh)
            ge.append(geff)

            # local efficiency
            leff = bct.efficiency_wei(corrmat_thresh, local=True)
            # print leff[2]
            for network in networks:
                # print network
                loceff[labels[network]].append(leff[network])
                # loceff['{0}, {1}'.format(labels[network], condition)].append(leff[network])
            # print loceff
            le["{0}, {1}".format(p, condition)] = loceff
Example #38
0
def load_directed_low_modularity_sample(thres=1):
    return bct.threshold_proportional(np.load(
        mat_path('sample_directed_gc.npy')),
                                      thres,
                                      copy=False)