Exemplo n.º 1
0
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
Exemplo n.º 2
0
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
Exemplo n.º 3
0
def modularity_and_efficiency(data):
    mod_scores = []
    eff_scores = []
    for i, x in enumerate(data):
        matrix = mp.preprocess_matrix(x)
        mod_score = bct.modularity_und(matrix)[1]
        eff_score = bct.efficiency_wei(matrix)

        mod_scores.append(mod_score)
        eff_scores.append(eff_score)

    return mod_scores, eff_scores
Exemplo n.º 4
0
def extract_epoch_graph_features(W):
    import bct

    L = bct.weight_conversion(W, "lengths")
    L[W == 0] = np.inf
    D, _ = bct.distance_wei(L)

    l, eff, ecc, radius, diameter = bct.charpath(D, include_infinite=False)

    return [
        bct.clustering_coef_wu(W),
        bct.efficiency_wei(W, local=True),
        bct.betweenness_wei(L),
        ecc,
        [l, eff, radius, diameter],
    ]
Exemplo n.º 5
0
def generate_null(layout, task, session, mask):
    null_dist = pd.DataFrame(index=subjects, columns=["mean", "sdev"])
    avg_corr = avg_corrmat(layout, task, session, mask)
    eff_perm = []
    j = 1
    while j < 3:
        effs = []
        W = null_model_und_sign(avg_corr.values)
        for thresh in np.arange(0.21, 0.31, 0.03):
            thresh_corr = bct.threshold_proportional(W, thresh)
            leff = bct.efficiency_wei(thresh_corr)
            effs.append(leff)
        effs_arr = np.asarray(effs)
        leff_auc = np.trapz(effs_arr, dx=0.03, axis=0)
        eff_perm.append(leff_auc)
        j += 1
    null_dist.at[(sesh[session], task, conds[i], mask),
                 "mean"] = np.mean(eff_perm)
    null_dist.at[(sesh[session], task, conds[i], mask),
                 "sdev"] = np.std(eff_perm)
    return null_dist
nb_trials = 100
N = 200
k = 4

# Pre-allocate arrays
prob = 10**(-3 * np.random.random(nb_trials))
SWI = np.zeros(nb_trials)
Eglob = np.zeros(nb_trials)
Eloc = np.zeros(nb_trials)

# Calculate statistics
for i, p in enumerate(prob):
    CIJ = NetworkWattsStrogatz(N, k, p)

    SWI[i] = SmallWorldIndex(CIJ)
    Eglob[i] = bct.efficiency_wei(CIJ, local=False)
    Eloc[i] = np.mean(bct.efficiency_wei(CIJ, local=True))

# Plot figures
plt.figure(1)
plt.semilogx(prob, SWI, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Small World Index')

plt.figure(2)
plt.subplot(211)
plt.semilogx(prob, Eglob, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Global efficiency')
plt.subplot(212)
plt.semilogx(prob, Eloc, marker='.', linestyle='none')
        participants = ['c1','c2','c3','c5','c6','c7','c8']
        
    all_measures = np.empty(shape=[68,len(participants),5])
    adjmats =  np.empty(shape=[68,68,len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(generate_ROI_file(FreeSurfer_ROI_file)).values
        labels,adjmat = remove_non_cortical_ROIs(labels,adjmat)
        all_measures[:,counter,0] = bct.degrees_und(adjmat)
        all_measures[:,counter,1] = bct.strengths_und(adjmat)
        all_measures[:,counter,2] = bct.clustering_coef_wu(adjmat)
        all_measures[:,counter,3] = bct.betweenness_wei(adjmat)
        all_measures[:,counter,4] = bct.efficiency_wei(adjmat,local=True)
        adjmats[:,:,counter] = adjmat
        counter += 1
        
        
    mean_measures = np.mean(all_measures,axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures, index=labels,columns=['patient.NodeDegree','patient.Strength','patient.ClustCoeff','patient.BetweenCent','patient.LocEff'])
        patient_measures = all_measures
        patient_adjmats = adjmats
    elif group == 'control':
        control = pd.DataFrame(mean_measures, index=labels,columns=['control.NodeDegree','control.Strength','control.ClustCoeff','control.BetweenCent','control.LocEff'])
        control_measures = all_measures
        control_adjmats = adjmats
       
# Getting the expression values
    all_measures = np.empty(shape=[68, len(participants), 5])
    adjmats = np.empty(shape=[68, 68, len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(
            generate_ROI_file(FreeSurfer_ROI_file)).values
        labels, adjmat = remove_non_cortical_ROIs(labels, adjmat)
        all_measures[:, counter, 0] = bct.degrees_und(adjmat)
        all_measures[:, counter, 1] = bct.strengths_und(adjmat)
        all_measures[:, counter, 2] = bct.clustering_coef_wu(adjmat)
        all_measures[:, counter, 3] = bct.betweenness_wei(adjmat)
        all_measures[:, counter, 4] = bct.efficiency_wei(adjmat, local=True)
        adjmats[:, :, counter] = adjmat
        counter += 1

    mean_measures = np.mean(all_measures, axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures,
                               index=labels,
                               columns=[
                                   'patient.NodeDegree', 'patient.Strength',
                                   'patient.ClustCoeff', 'patient.BetweenCent',
                                   'patient.LocEff'
                               ])
        patient_measures = all_measures
        patient_adjmats = adjmats
    elif group == 'control':
Exemplo n.º 9
0
        #steps of 5 or 10 percent
        #citation for integrating over the range is likely in the Fundamentals of Brain Network Analysis book
        #(http://www.danisbassett.com/uploads/1/1/8/5/11852336/network_analysis_i__ii.pdf)
        #typically done: make sure your metric's value is stable across your range of thresholds
        #the more metrics you use, the more you have to correct for multiple comparisons
        #make sure this is hypothesis-driven and not fishing

        for p in thresh_range:
            ge = []
            cc = []
            ntwk_corrmat_thresh = bct.threshold_proportional(
                network_correlation_matrix, p, copy=True)
            #np.savetxt(join(sink_dir, sessions[i], s, '{0}_corrmat_Laird2011_thresh_{1}.csv'.format(s, p)), ntwk_corrmat_thresh, delimiter=',')
            #measures of interest here
            #global efficiency
            le = bct.efficiency_wei(ntwk_corrmat_thresh)
            ge.append(le)

            #clustering coefficient
            c = bct.clustering_coef_wu(ntwk_corrmat_thresh)
            cc.append(c)

            network[p] = ge
            network_wise[p] = cc

        ntwk_df = pd.Series(network).T
        #ntwk_df.columns = ['total positive', 'total negative', 'efficiency', 'path length', 'modularity']

        ntwk_wise_df = pd.Series(network_wise).T
        #ntwk_wise_df.columns = ['betweenness', 'degree', 'positive weights', 'negative weights',
        #                                                   'community index', 'clustering coefficient']
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_post.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_post.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.efficiency_wei(g) for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.efficiency_wei(g) for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(y, n_folds=6, shuffle=True)

    cv_params = {
Exemplo n.º 11
0
def threshold_omst_global_cost_efficiency(mtx, n_msts=None):
    """ Threshold a graph by optimizing the formula GE-C via orthogonal MSTs.

    .. [Dimitriadis2017a] Dimitriadis, S. I., Salis, C., Tarnanas, I., & Linden, D. E. (2017). Topological Filtering of Dynamic Functional Brain Networks Unfolds Informative Chronnectomics: A Novel Data-Driven Thresholding Scheme Based on Orthogonal Minimal Spanning Trees (OMSTs). Frontiers in neuroinformatics, 11.
    .. [Dimitriadis2017n] Dimitriadis, S. I., Antonakakis, M., Simos, P., Fletcher, J. M., & Papanicolaou, A. C. (2017). Data-driven Topological Filtering based on Orthogonal Minimal Spanning Trees: Application to Multi-Group MEG Resting-State Connectivity. Brain Connectivity, (ja).
    .. [Basset2009] Bassett, D. S., Bullmore, E. T., Meyer-Lindenberg, A., Apud, J. A., Weinberger, D. R., & Coppola, R. (2009). Cognitive fitness of cost-efficient brain functional networks. Proceedings of the National Academy of Sciences, 106(28), 11747-11752.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.

    n_msts : int or None
        Maximum number of OMSTs to compute. Default `None`; an exhaustive
        computation will be performed.

    Returns
    -------
    nCIJtree : array-like, shape(n_msts, N, N)
        A matrix containing all the orthogonal MSTs.

    CIJtree : array-like, shape(N, N)
        Resulting graph.

    degree : float
        The mean degree of the resulting graph.

    global_eff : float
        Global efficiency of the resulting graph.

    global_cost_eff_max : float
        The value where global efficiency - cost is maximized.

    cost_max : float
        Cost of the network at the maximum global cost efficiency.
    """
    imtx = np.copy(mtx)
    imtx_uptril = np.copy(mtx)

    N, _ = np.shape(imtx)

    for k in range(N):
        for l in range(k + 1, N):
            imtx_uptril[l, k] = 0.0
    np.fill_diagonal(imtx_uptril, 0.0)

    # Find the number of orthogonal msts according to the desired mean degree
    num_edges = len(np.where(imtx > 0.0)[0])

    if n_msts is None:
        num_msts = np.round(num_edges / (N - 1)) + 1
    else:
        num_msts = n_msts
    pos_num_msts = np.round(num_edges / (N - 1))

    if num_msts > pos_num_msts:
        num_msts = pos_num_msts

    CIJnotintree = imtx

    # Keep the N-1 connections of the num_msts MSTs
    num_msts = np.int32(num_msts)
    mst_conn = np.zeros((num_msts * (N - 1), 2))

    nCIJtree = np.zeros((num_msts, N, N))  #, dtype=np.int32)
    omst = np.zeros((num_msts, N, N), dtype=np.float32)

    # Repeat N-2 times
    count = 0
    CIJtree = np.zeros((N, N))

    for no in range(num_msts):
        tmp_mtx = 1.0 / CIJnotintree
        # ugly code ~_~
        graph = nx.from_numpy_matrix(tmp_mtx)
        # graph = nx.Graph()
        # for x in range(N):
        #     for y in range(x+1, N):
        #         graph.add_edge(x, y, weight=tmp_mtx[x][y])
        mst = nx.minimum_spanning_tree(graph)
        links = list(mst.edges())

        new_mst = np.zeros((N, N))
        mst_num_links = len(links)
        for k in range(mst_num_links):
            link1 = links[k][0]
            link2 = links[k][1]

            CIJtree[link1, link2] = imtx[link1, link2]
            CIJtree[link2, link1] = imtx[link1, link2]

            mst_conn[count, 0] = link1
            mst_conn[count, 1] = link2

            new_mst[link1, link2] = imtx[link1, link2]
            new_mst[link2, link1] = imtx[link1, link2]
            count += 1

        iCIJtree = np.ones((N, N))
        iCIJtree[np.where(CIJtree != 0.0)] = 0
        CIJnotintree = CIJnotintree * iCIJtree
        nCIJtree[no, :, :] = CIJtree
        omst[no, :, :] = new_mst

    global_eff_ini = bct.efficiency_wei(imtx_uptril) * 2.0
    cost_ini = np.sum(imtx_uptril[:])

    # Insert the 1st MST
    graph = np.zeros((N, N))
    global_cost_eff = np.zeros((num_msts, 1))
    degrees = np.zeros((num_msts, 1))
    cost = np.zeros((num_msts, 1))

    for k in range(num_msts):
        graph = nCIJtree[k, :, :]

        degree = bct.degrees_und(graph)
        mean_degree = np.mean(degree)
        degrees[k] = mean_degree

        cost[k] = np.sum(graph) / cost_ini

        global_eff = bct.efficiency_wei(graph)
        global_cost_eff[k] = global_eff / global_eff_ini - cost[k]

    # Get the OMST where the formula GE-C is maximized
    indx_max = np.argmax(global_cost_eff)

    # Final output
    degree = degrees[indx_max]
    CIJtree = nCIJtree[indx_max, :, :]
    cost_max = cost[indx_max]
    global_eff = bct.efficiency_wei(1.0 / CIJtree)
    global_cost_eff_max = global_cost_eff[indx_max]

    # import matplotlib.pyplot as plt
    # plt.figure()
    # plt.plot(cost, global_cost_eff)
    # plt.plot(cost_max, global_cost_eff_max, 'b*', label='Max Global Cost Efficiency')
    # plt.title('Economical small-world network at max Global Cost Efficiency')
    # plt.xlabel('Cost')
    # plt.ylabel('Global Cost Efficiency')
    # plt.legend()
    # plt.show()

    # return nCIJtree, CIJtree, degree, global_eff, global_cost_eff_max, cost_max
    return nCIJtree, CIJtree, degree, global_eff, global_cost_eff_max, cost_max, cost, global_cost_eff
Exemplo n.º 12
0
def cal_indiv_graph():
	'''loop through subjects and get PC/WMD/Q/eG/CI'''

	### loop through subjects, 1 to 156

	gordon_files = glob.glob("Data/*Gordon*.netcc")
	yeo_files = glob.glob("Data/*Yeo*.netcc")
	files = gordon_files + yeo_files

	for f in files:
		
		if f in gordon_files:		
			cmd = "cat %s | tail -n 352 > Data/test" %f 
			roi='gordon'
		
		if f in yeo_files:
			cmd = "cat %s | tail -n 422 > Data/test" %f #422 for Yeo
			roi='yeo'

		sub = f[5:8]
		os.system(cmd)


		# load matrix
		matrix = np.genfromtxt('Data/test',delimiter='\t',dtype=None)
		matrix[np.isnan(matrix)] = 0.0  
		matrix[matrix<0]=0.0


		# step through costs, do infomap, return final infomap across cost
		max_cost = .15
		min_cost = .01

		partition = ave_consensus_costs_parition(matrix, min_cost, max_cost)
		partition = np.array(partition) + 1

		# calculate modularity, efficiency?
		Q = cal_modularity_w_imposed_community(matrix,partition)
		Eg = bct.efficiency_wei(matrix)

		# import thresholded matrix to BCT, import partition, run WMD/PC
		PCs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))
		WMDs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))

		for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):
			
			tmp_matrix = threshold(matrix.copy(), cost)
			
			#PC
			PCs[i,:] = bct.participation_coef(tmp_matrix, partition)
			#WMD
			WMDs[i,:] = bct.module_degree_zscore(matrix, partition)

		PC = np.mean(PCs, axis=0) # ave across thresholds
		WMD = np.mean(WMDs, axis=0)

		
		fn = "Graph_output/%s_%s_PC" %(sub, roi)
		np.savetxt(fn, PC)

		fn = "Graph_output/%s_%s_WMD" %(sub, roi)
		np.savetxt(fn, WMD)
		
		fn = "Graph_output/%s_%s_Q" %(sub, roi)
		np.savetxt(fn, np.array(Q, ndmin=1))

		fn = "Graph_output/%s_%s_Eg" %(sub, roi)
		np.savetxt(fn, np.array(Eg, ndmin=1))

		fn = "Graph_output/%s_%s_Partition" %(sub, roi)
		np.savetxt(fn, partition)
null_dist = pd.DataFrame(index=index, columns=["mean", "sdev"])
for session in sessions:
    print(session, datetime.datetime.now())
    for task in tasks.keys():
        print(task, datetime.datetime.now())
        for i in np.arange(0, len(tasks[task][0]["conditions"])):
            condition = tasks[task][0]["conditions"][i]
            print(condition, datetime.datetime.now())
            for mask in masks:
                print(mask, datetime.datetime.now())
                avg_corr = avg_corrmat(data_dir, subjects, task, condition,
                                       session, mask)
                eff_perm = []
                j = 1
                while j < 3:
                    effs = []
                    W = null_model_und_sign(avg_corr.values)
                    for thresh in np.arange(0.21, 0.31, 0.03):
                        thresh_corr = bct.threshold_proportional(W, thresh)
                        leff = bct.efficiency_wei(thresh_corr)
                        effs.append(leff)
                    effs_arr = np.asarray(effs)
                    leff_auc = np.trapz(effs_arr, dx=0.03, axis=0)
                    eff_perm.append(leff_auc)
                    j += 1
                null_dist.at[(sesh[session], task, conds[i], mask),
                             "mean"] = np.mean(eff_perm)
                null_dist.at[(sesh[session], task, conds[i], mask),
                             "sdev"] = np.std(eff_perm)
        null_dist.to_csv(join(sink_dir, "null_dist-local_efficiency.csv"))
def get_graph_metrics(connectivity_vector) :
    
    # reshape into matrix
    connectivity_matrix = np.reshape(connectivity_vector, (90, 90))
    
    # convert to networkx graph
    connectivity_graph = nwx.from_numpy_matrix(connectivity_matrix)
    
    # convert to distance graph as some metrics need this instead
    distance_matrix = connectivity_matrix
    distance_matrix[distance_matrix == 0] = np.finfo(np.float32).eps
    distance_matrix = 1.0 / distance_matrix
    distance_graph = nwx.from_numpy_matrix(distance_matrix)
    
    # intialise vector of metrics
    metrics = np.zeros((21,))
    # fill the vector of metrics
    # 1 and 2: degree distribution
    degrees = np.sum(connectivity_matrix, axis = 1)
    metrics[0] = np.mean(degrees)
    metrics[1] = np.std(degrees)
    
    # 3 and 4: weight distribution
    weights = np.tril(connectivity_matrix, k = -1)
    metrics[2] = np.mean(weights)
    metrics[3] = np.std(weights)

    # 5: average shortest path length
    # transform weights to distances so this makes sense    
    metrics[4] = nwx.average_shortest_path_length(distance_graph, weight='weight')

    # 6: assortativity
    metrics[5] = nwx.degree_assortativity_coefficient(connectivity_graph, weight='None')
    
    # 7: clustering coefficient
    metrics[6] = nwx.average_clustering(connectivity_graph, weight='weight')
    
    # 8: transitivity
    metrics[7] = nwx.transitivity(connectivity_graph)
    
    # 9 & 10: local and global efficiency
    metrics[8] = np.mean(bct.efficiency_wei(connectivity_matrix, local=True))
    metrics[9] = bct.efficiency_wei(connectivity_matrix, local=False)
    
    # 11: Clustering coefficient
    metrics[10] = np.mean(nwx.clustering(connectivity_graph, weight='weight').values())
    
    # 12 & 13: Betweeness centrality
    metrics[11] = np.mean(nwx.betweenness_centrality(distance_graph, weight='weight').values())
    metrics[12] = np.mean(nwx.current_flow_betweenness_centrality(distance_graph, weight='weight').values())
    
    # 14: Eigenvector centrality
    metrics[13] = np.mean(nwx.eigenvector_centrality(distance_graph, weight='weight').values())
    
    # 15: Closeness centrality
    metrics[14] = np.mean(nwx.closeness_centrality(distance_graph, distance='weight').values())
    
    # 16: PageRank
    metrics[15] = np.mean(nwx.pagerank(connectivity_graph, weight='weight').values())
    
    # 17: Rich club coefficient
    metrics[16] = np.mean(nwx.rich_club_coefficient(connectivity_graph).values())
    
    # 18: Density    
    metrics[17] = bct.density_und(connectivity_matrix)[0]
    
    # 19, 20, 21: Eccentricity, radius, diameter
    spl_all = nwx.shortest_path_length(distance_graph, weight='weight')
    eccs = np.zeros(90,)
    for i in range(90) :
        
        eccs[i] = np.max(spl_all[i].values())
        
    metrics[18] = np.mean(eccs)
    metrics[19] = np.min(eccs)
    metrics[20] = np.max(eccs)  
    
    return metrics
Exemplo n.º 15
0
def advanced_network_analysis(graph, kstep=1, sstep=600., outdir=None):
    """ Map structural cores to delineate network modules, and to identify
    hub regions that link distinct clusters.

    Definition:

    The k-core is the largest subnetwork comprising nodes of degree at
    least k.

    The s-core is the largest subnetwork comprising nodes of strength at
    least s.

    The optimal community structure is a subdivision of the
    network into nonoverlapping groups of nodes which maximizes the number
    of within-group edges and minimizes the number of between-group edges.

    The rich club coefficient, R, at level k is the fraction of edges that
    connect nodes of degree k or higher out of the maximum number of edges
    that such nodes might share.

    Network features:

    kcores:
        each node associated k-core number.
    klevels:
        size of s-cores when the s-level increase.
    scores:
        each node associated s-core number.
    slevels:
        size of s-cores when the s-level increase.
    community:
        the computed community structure.
    qstat:
        the objective modularity function optimized q-statistic.
    rich_clubs:
        vector of rich-club coefficients for levels 1 to klevel=the maximum
        degree of the adjacency matrix.
    global_efficiency:
        the global efficiency is the average of inverse shortest path
        length, and is inversely related to the characteristic path length.
    local_efficiency:
        the local efficiency is the global efficiency computed on the
        neighborhood of the node, and is related to the clustering coefficient.

    Parameters
    ----------
    graph: Graph
        the graph reprensenting the connectome network.
    kstep: int (optional, default 1)
        the k-core size increment.
    sstep: float (optional, default 600.)
        the s-core size increment.
    outdir: str (optional, default None)
        if specified save some snapshots.

    Returns
    -------
    outputs: dict
        the network features.
    snaps: list of file
        the generates snaps.
    """
    adjacency_matrix = numpy.ascontiguousarray(nx.to_numpy_matrix(graph))

    # Efficiency
    global_efficiency = bct.efficiency_wei(adjacency_matrix)
    local_efficiency = bct.efficiency_wei(adjacency_matrix, local=True)

    # K-core decomposition
    k = 0
    kcores = numpy.zeros(adjacency_matrix.shape[0], dtype=int)
    klevels = []
    kxs = []
    processed_indices = set()
    while True:
        if not graph.is_directed():
            kcore, kn, peelorder, peellevel = bct.kcore_bu(adjacency_matrix,
                                                           k,
                                                           peel=True)
            klevels.append(kn)
            kxs.append(k)
        else:
            kcore, kn, peelorder, peellevel = bct.kcore_bd(adjacency_matrix,
                                                           k,
                                                           peel=True)
        for indices in peelorder:
            new_indices = set(indices) - processed_indices
            processed_indices = processed_indices.union(new_indices)
            kcores[list(new_indices)] = k
        if kn == 0:
            break
        k += 1

    # S-core decompositon
    scores = numpy.zeros(adjacency_matrix.shape[0], dtype=int)
    slevels = []
    sxs = []
    processed_indices = set()
    s = 0
    while True:
        if not graph.is_directed():
            score, sn = bct.score_wu(adjacency_matrix, s)
            slevels.append(sn)
            sxs.append(s)
        else:
            raise NotImplementedError
        ff = numpy.where(score == 0)
        for node_index in ff[0]:
            if node_index in processed_indices:
                continue
            if not (score[node_index, :] == 0).all():
                continue
            scores[node_index] = s
            processed_indices.add(node_index)
        if sn == 0:
            break
        s += sstep

    # Community detection
    community, qstat = bct.community_louvain(adjacency_matrix,
                                             gamma=1,
                                             ci=None,
                                             B="modularity",
                                             seed=None)

    # Hub detection
    if not graph.is_directed():
        rich_clubs = bct.rich_club_wu(adjacency_matrix, klevel=None)
    else:
        rich_clubs = bct.rich_club_wd(adjacency_matrix, klevel=None)

    # Summarize results in a dictionnary
    params = locals()
    outputs = dict([
        (name, params[name])
        for name in ("kcores", "klevels", "kxs", "scores", "slevels", "sxs",
                     "community", "qstat", "rich_clubs", "global_efficiency",
                     "local_efficiency")
    ])

    # Snaps
    snaps = []
    if outdir is not None:
        import pylab as plt
        if not os.path.isdir(outdir):
            raise ValueError("'{0}' is not a valid directory.".format(outdir))
        for x, measures, label in [(kxs, klevels, "klevels"),
                                   (sxs, slevels, "slevels")]:
            outfile = os.path.join(outdir, label + ".png")
            snaps.append(outfile)
            plt.figure()
            plt.plot(x, measures, "-bo")
            plt.xlabel(label)
            plt.ylabel("Number of nodes")
            plt.savefig(outfile)
            plt.close()

    return outputs, snaps
Exemplo n.º 16
0
#####################################################################
########### testing graph theoretic measures (eff, leff) ############


# geffs = pd.DataFrame(index=df.index, columns=np.arange(0,1,0.1))
for i in df.index:
    corrmat = pd.read_csv(
        join(data_dir, "out", "{0}-phy-corrmat-regionwise.csv".format(i)),
        header=0,
        index_col=0,
    )
    gw = []
    for p in np.arange(0, 1, 0.1):
        corrmat_thresh = bct.threshold_proportional(corrmat.values, p)
        gw.append(bct.efficiency_wei(corrmat_thresh))
    # geffs.at[i] = gw
    df.at[i, "Global Efficiency Physics"] = np.trapz(gw[4:9], dx=0.1)
    corrmat = pd.read_csv(
        join(data_dir, "out", "{0}-gen-corrmat-regionwise.csv".format(i)),
        header=0,
        index_col=0,
    )
    gw = []
    for p in np.arange(0, 1, 0.1):
        corrmat_thresh = bct.threshold_proportional(corrmat.values, p)
        gw.append(bct.efficiency_wei(corrmat_thresh))
    # geffs.at[i] = gw
    df.at[i, "Global Efficiency General"] = np.trapz(gw[4:9], dx=0.1)

f, ax = plt.subplots()
def process(data):
    return bct.efficiency_wei(data, local=False)
Exemplo n.º 18
0
def localEfficiency(network):
    bct.efficiency_wei(network, local=True)
Exemplo n.º 19
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_length_matrix, args.in_conn_matrix])

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    if not args.append_json:
        assert_outputs_exist(parser, args, args.out_json)
    else:
        logging.debug('Using --append_json, make sure to delete {} '
                      'before re-launching a group analysis.'.format(
                          args.out_json))

    if args.append_json and args.overwrite:
        parser.error('Cannot use the append option at the same time as '
                     'overwrite.\nAmbiguous behavior, consider deleting the '
                     'output json file first instead.')

    conn_matrix = load_matrix_in_any_format(args.in_conn_matrix)
    len_matrix = load_matrix_in_any_format(args.in_length_matrix)

    if args.filtering_mask:
        mask_matrix = load_matrix_in_any_format(args.filtering_mask)
        conn_matrix *= mask_matrix
        len_matrix *= mask_matrix
    N = len_matrix.shape[0]

    if args.avg_node_wise:
        func_cast = avg_cast
    else:
        func_cast = list_cast

    gtm_dict = {}
    betweenness_centrality = bct.betweenness_wei(len_matrix) / ((N - 1) *
                                                                (N - 2))
    gtm_dict['betweenness_centrality'] = func_cast(betweenness_centrality)
    ci, gtm_dict['modularity'] = bct.modularity_louvain_und(conn_matrix,
                                                            seed=0)

    gtm_dict['assortativity'] = bct.assortativity_wei(conn_matrix, flag=0)
    gtm_dict['participation'] = func_cast(
        bct.participation_coef_sign(conn_matrix, ci)[0])
    gtm_dict['clustering'] = func_cast(bct.clustering_coef_wu(conn_matrix))

    gtm_dict['nodal_strength'] = func_cast(bct.strengths_und(conn_matrix))
    gtm_dict['local_efficiency'] = func_cast(
        bct.efficiency_wei(len_matrix, local=True))
    gtm_dict['global_efficiency'] = func_cast(bct.efficiency_wei(len_matrix))
    gtm_dict['density'] = func_cast(bct.density_und(conn_matrix)[0])

    # Rich club always gives an error for the matrix rank and gives NaN
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        tmp_rich_club = bct.rich_club_wu(conn_matrix)
    gtm_dict['rich_club'] = func_cast(tmp_rich_club[~np.isnan(tmp_rich_club)])

    # Path length gives an infinite distance for unconnected nodes
    # All of this is simply to fix that
    empty_connections = np.where(np.sum(len_matrix, axis=1) < 0.001)[0]
    if len(empty_connections):
        len_matrix = np.delete(len_matrix, empty_connections, axis=0)
        len_matrix = np.delete(len_matrix, empty_connections, axis=1)

    path_length_tuple = bct.distance_wei(len_matrix)
    gtm_dict['path_length'] = func_cast(path_length_tuple[0])
    gtm_dict['edge_count'] = func_cast(path_length_tuple[1])

    if not args.avg_node_wise:
        for i in empty_connections:
            gtm_dict['path_length'].insert(i, -1)
            gtm_dict['edge_count'].insert(i, -1)

    if args.small_world:
        gtm_dict['omega'], gtm_dict['sigma'] = omega_sigma(len_matrix)

    if os.path.isfile(args.out_json) and args.append_json:
        with open(args.out_json) as json_data:
            out_dict = json.load(json_data)
        for key in gtm_dict.keys():
            if isinstance(out_dict[key], list):
                out_dict[key].append(gtm_dict[key])
            else:
                out_dict[key] = [out_dict[key], gtm_dict[key]]
    else:
        out_dict = {}
        for key in gtm_dict.keys():
            out_dict[key] = [gtm_dict[key]]

    with open(args.out_json, 'w') as outfile:
        json.dump(out_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
                                format(subject, session, task, conditions[i],
                                       mask),
                            ),
                            delimiter=" ",
                        )

                        leff_s = []
                        coef_s = []
                        for p in np.arange(kappa_upper, kappa_lower, 0.02):
                            thresh = bct.threshold_proportional(corrmat,
                                                                p,
                                                                copy=True)

                            # network measures of interest here
                            # global efficiency
                            le = bct.efficiency_wei(thresh, local=True)
                            leff_s.append(le)

                        leff_s = np.asarray(leff_s)
                        leff = np.trapz(leff_s, dx=0.01, axis=0)

                        for j in np.arange(0, leff.shape[0]):
                            df.at[(subject, session, task, conds[i], mask),
                                  "lEff{0}".format(j), ] = leff[j]
                        lab_notebook.at[(subject, session, task, conds[i],
                                         mask),
                                        "end"] = str(datetime.datetime.now())
                    except Exception as e:
                        print(e, subject, session)
                        lab_notebook.at[(subject, session, task, conds[i],
                                         mask), "errors"] = [
def process(data):
    # average first, then calculate. Because calculation is too slow.
    return bct.efficiency_wei(data, local=True)
Exemplo n.º 22
0
            subject,
            "fc left central executive-right central executive {0}".format(condition),
        ] = corrmats[condition][14, 17]
        ge = []
        le = {}
        loceff = {}
        loceff["default mode"] = []
        loceff["left central executive"] = []
        loceff["right central executive"] = []
        for p in thresh_range:
            corrmat_thresh = bct.threshold_proportional(
                corrmats[condition], p, copy=True
            )
            # measures of interest here
            # global efficiency
            geff = bct.efficiency_wei(corrmat_thresh)
            ge.append(geff)

            # local efficiency
            leff = bct.efficiency_wei(corrmat_thresh, local=True)
            # print leff[2]
            for network in networks:
                # print network
                loceff[labels[network]].append(leff[network])
                # loceff['{0}, {1}'.format(labels[network], condition)].append(leff[network])
            # print loceff
            le["{0}, {1}".format(p, condition)] = loceff

        # print 'global efficiency is {0}'.format(ge)
        df.at[subject, "global efficiency {0}".format(condition)] = np.trapz(ge, dx=0.1)
Exemplo n.º 23
0
            np.savetxt(join(sink_dir, sesh[session], subject, '{0}-session-{1}-rest_network_corrmat_craddock2012.csv'.format(subject, session)), craddock_corrmat, delimiter=",")
            #craddock_corrmat = np.genfromtxt(join(sink_dir, session, 'resting-state', subject, '{0}_network_corrmat_craddock2012.csv'.format(subject)), delimiter=",")

            ge_s = []
            ge_c = []
            cp_s = []
            cp_c = []
            md_s = []
            md_c = []
            for p in np.arange(0.1, 1, 0.1):
                ntwk = []
                shen_thresh = bct.threshold_proportional(shen_corrmat, p, copy=True)
                craddock_thresh = bct.threshold_proportional(craddock_corrmat, p, copy=True)
                #network measures of interest here
                #global efficiency
                ge = bct.efficiency_wei(shen_thresh)
                ge_s.append(ge)
                ge = bct.efficiency_wei(craddock_thresh)
                ge_c.append(ge)

                #characteristic path length
                cp = bct.charpath(shen_thresh)
                cp_s.append(cp[0])
                cp = bct.charpath(craddock_thresh)
                cp_c.append(cp[0])

                #modularity
                md = bct.modularity_louvain_und(shen_thresh)
                md_s.append(md[1])
                md = bct.modularity_louvain_und(craddock_thresh)
                md_c.append(md[1])
Exemplo n.º 24
0
def test_glob_eff():
    x = load_sample(thres=.4)
    geff = bct.efficiency_wei(x)
    print(geff, 1.8784)
    assert np.allclose(geff, 1.8784, atol=1e-4)
Exemplo n.º 25
0
def get_true_network_metrics(A):

    #control centrality
    c_c = control_centrality(A)

    cc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        cc_fake[i] = np.mean(control_centrality(generate_fake_graph(A)))

    m_cc_fake = np.mean(cc_fake)
    cc_norm = c_c / m_cc_fake

    # Get identity of node with lowest control centrality
    min_cc_true = np.where(c_c == np.amin(c_c))[0]

    # get synchronizability
    sync = synchronizability(A)

    # normalized sync
    sync_fake = np.zeros((100, 1))
    for i in range(0, 100):
        sync_fake[i] = synchronizability(generate_fake_graph(A))

    m_sync_fake = np.mean(sync_fake)
    sync_norm = sync / m_sync_fake

    # get betweeness centrality
    bc = betweenness_centrality(A)
    bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        bc_fake[i] = np.mean(betweenness_centrality(generate_fake_graph(A)))

    m_bc_fake = np.mean(bc_fake)
    bc_norm = bc / m_bc_fake

    # Get identity of node with max bc
    max_bc_true = np.where(bc == np.amax(bc))[0]

    # get eigenvector centrality
    ec = bct.eigenvector_centrality_und(A)
    ec_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ec_fake[i] = np.mean(
            bct.eigenvector_centrality_und(generate_fake_graph(A)))

    m_ec_fake = np.mean(ec_fake)
    ec_norm = ec / m_ec_fake

    # Get identity of node with max ec
    max_ec_true = np.where(ec == np.amax(ec))[0]

    # get edge betweeness centrality
    edge_bc, ignore = bct.edge_betweenness_wei(A)
    edge_bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        edge_bc_fake[i] = np.mean(
            bct.edge_betweenness_wei(generate_fake_graph(A))[0])
    m_edge_bc_fake = np.mean(edge_bc_fake)
    edge_bc_norm = edge_bc / m_edge_bc_fake

    # get clustering coeff
    clust = bct.clustering_coef_wu(A)
    clust_fake = np.zeros((100, 1))
    for i in range(0, 100):
        clust_fake[i] = np.mean(bct.clustering_coef_wu(generate_fake_graph(A)))

    m_clust_fake = np.mean(clust_fake)
    clust_norm = clust / m_clust_fake

    # Get identity of node with max clust
    max_clust_true = np.where(clust == np.amax(clust))[0]

    # get node strength
    ns = node_strength(A)
    ns_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ns_fake[i] = np.mean(node_strength(generate_fake_graph(A)))

    m_ns_fake = np.mean(ns_fake)
    ns_norm = ns / m_ns_fake

    # Get identity of node with max clust
    max_ns_true = np.where(ns == np.amax(ns))[0]

    #Get true efficiency
    Ci, ignore = bct.modularity_und(A)
    par = bct.participation_coef(A, Ci)

    eff = bct.efficiency_wei(A, 0)
    eff_fake = np.zeros((100, 1))
    for i in range(0, 100):
        eff_fake[i] = (bct.efficiency_wei(generate_fake_graph(A)))

    m_eff_fake = np.mean(eff_fake)
    eff_norm = eff / m_eff_fake

    # Get true transistivity
    trans = bct.transitivity_wu(A)
    trans_fake = np.zeros((100, 1))
    for i in range(0, 100):
        trans_fake[i] = (bct.transitivity_wu(generate_fake_graph(A)))

    m_trans_fake = np.mean(trans_fake)
    trans_norm = trans / m_trans_fake

    # store output results in a dictionary
    #nodal
    results = {}
    results['control_centrality'] = c_c
    results['control_centrality_norm'] = cc_norm
    results['min_cc_node'] = min_cc_true

    # global measure
    results['sync'] = sync
    results['sync_norm'] = sync_norm

    # nodal
    results['bc'] = bc
    results['bc_norm'] = bc_norm
    results['max_bc_node'] = max_bc_true

    # nodal
    results['ec'] = ec
    results['ec_norm'] = ec_norm
    results['max_ec_node'] = max_ec_true

    # nodal
    results['clust'] = clust
    results['clust_norm'] = clust_norm
    results['max_clust_node'] = max_clust_true

    # nodal
    results['ns'] = ns
    results['ns_norm'] = ns_norm
    results['max_ns_node'] = max_ns_true

    # global
    results['eff'] = eff
    results['eff_norm'] = eff_norm

    # global
    results['trans'] = trans
    results['trans_norm'] = trans_norm

    # nodal
    results['par'] = par

    # edge
    results['edge_bc'] = edge_bc
    results['edge_bc_norm'] = edge_bc_norm

    return (results)
nb_trials = 100
N = 200
k = 4

# Pre-allocate arrays
prob = 10**(-3*np.random.random(nb_trials))
SWI   = np.zeros(nb_trials)
Eglob = np.zeros(nb_trials)
Eloc  = np.zeros(nb_trials)

# Calculate statistics
for i, p in enumerate(prob):
  CIJ = NetworkWattsStrogatz(N, k, p)

  SWI[i]   = SmallWorldIndex(CIJ)
  Eglob[i] = bct.efficiency_wei(CIJ, local=False)
  Eloc[i]  = np.mean(bct.efficiency_wei(CIJ, local=True))

# Plot figures
plt.figure(1)
plt.semilogx(prob, SWI, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Small World Index')

plt.figure(2)
plt.subplot(211)
plt.semilogx(prob, Eglob, marker='.', linestyle='none')
plt.xlabel('Rewiring probability')
plt.ylabel('Global efficiency')
plt.subplot(212)
plt.semilogx(prob, Eloc, marker='.', linestyle='none')
Exemplo n.º 27
0
                            '{0}-session-{1}_{2}-{3}_{4}-corrmat.csv'.format(
                                subject, session, task, conditions[i], mask)),
                                                delimiter=' ')

                        ge_s = []
                        cp_s = []
                        md_s = []
                        for p in np.arange(kappa_upper, kappa_lower, 0.01):
                            ntwk = []
                            thresh = bct.threshold_proportional(corrmat,
                                                                p,
                                                                copy=True)

                            #network measures of interest here
                            #global efficiency
                            ge = bct.efficiency_wei(thresh)
                            ge_s.append(ge)

                            #characteristic path length
                            cp = bct.charpath(thresh)
                            cp_s.append(cp[0])

                            #modularity
                            md = bct.modularity_louvain_und(thresh)
                            md_s.append(md[1])

                        df.at[(subject, session, task, conds[i], mask),
                              'efficiency'] = np.trapz(ge_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
                              'charpath'] = np.trapz(cp_s, dx=0.01)
                        df.at[(subject, session, task, conds[i], mask),
Exemplo n.º 28
0
def graph_estimates(cm, th):

    #dictionary for storing our results
    d = OrderedDict()

    #thresholding moved here for other matrices than MatLab matrices
    #removes negative weights
    cm = bct.threshold_absolute(cm, 0.0)

    cm = threshold_connected(cm, th)

    
    #for binarizing the connectivity matrices, 
    #we work with weighted so this is turned off
    #bin_cm = bct.binarize(cm)
    
    #invert the connectivity for computing shortest paths
    cm_inv = bct.invert(cm)

    #modularity_und is found in modularity.py
    modularity_und = bct.modularity_und(cm)

    #the community_affiliation vector that gets input to some of the functions
    community_affiliation = modularity_und[0]
    
    #distance_wei and charpath is found in distance.py
    distance_wei = bct.distance_wei(cm_inv)
    charpath = bct.charpath(distance_wei[0], False, False)

    #clustering_coef_wu is found in clustering.py
    clustering_coef_wu = bct.clustering_coef_wu(cm)
    avg_clustering_coef_wu = np.mean(clustering_coef_wu)


    #assortativity_wei is found in core.py
    d['assortativity_wei-r'] = bct.assortativity_wei(cm, flag=0)

    #just taking the average of clustering_coef_wu
    d['avg_clustering_coef_wu:C'] = avg_clustering_coef_wu

    d['charpath-lambda'] = charpath[0]
    #d['charpath-efficiency'] = charpath[1]   
    #d['charpath-ecc'] = charpath[2]           
    #d['charpath-radius'] = charpath[3]
    #d['charpath-diameter'] = charpath[4]

    d['clustering_coef_wu-C'] = clustering_coef_wu


    d['efficiency_wei-Eglob'] = bct.efficiency_wei(cm)
    #d['efficiency_wei-Eloc'] = bct.efficiency_wei(cm, True)

    #d['modularity_und-ci'] = modularity_und[0]
    d['modularity_und-Q'] = modularity_und[1]

    d['small_worldness:S'] = compute_small_worldness(cm,
                                                     avg_clustering_coef_wu,
                                                     charpath[0])

   
   #transitivity_wu can be found in clustering.py
    d['transitivity_wu-T'] = bct.transitivity_wu(cm)


    #EXAMPLES for local measures and binary measures. Comment in to use. 

    #VECTOR MEASURES
    #d['betweenness_wei-BC'] = bct.betweenness_wei(cm_inv)
    # d['module_degree_zscore-Z'] = bct.module_degree_zscore(cm, community_affiliation)
    #d['degrees_und-deg'] = bct.degrees_und(cm)
    #d['charpath-ecc'] = charpath[2]


    #BINARIES
    # d['clustering_coef_bu-C'] = bct.clustering_coef_bu(bin_cm)
    # d['efficiency_bin-Eglob'] = bct.efficiency_bin(bin_cm)
    # d['efficiency_bin-Eloc'] = bct.efficiency_bin(bin_cm, True)
    #  d['modularity_und_bin-ci'] = modularity_und_bin[0]
    #  d['modularity_und_bin-Q'] = modularity_und_bin[1]
    # d['transitivity_bu-T'] = bct.transitivity_bu(bin_cm)
    #  d['betweenness_bin-BC'] = bct.betweenness_bin(bin_cm)
    #  modularity_und_bin = bct.modularity_und(bin_cm)
    #d['participation_coef'] = bct.participation_coef(cm, community_affiliation)


    ######## charpath giving problems with ecc, radius and diameter
    # np.seterr(invalid='ignore')


    return d
Exemplo n.º 29
0
def test_loc_eff():
    x = load_sample(thres=.4)
    leff = bct.efficiency_wei(x, local=True)
    print(np.sum(leff), 315.6225)
    assert np.allclose(np.sum(leff), 315.6225, atol=0.1)
Exemplo n.º 30
0
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_pln.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_pln.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(np.asarray([bct.efficiency_wei(g)
                                    for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(np.asarray([bct.efficiency_wei(g)
                                    for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)
for subject in subjects:
    cls = np.load(source_folder +
                  "graph_data/%s_classic_pow_pre.npy" % subject).item()

    pln = np.load(source_folder +
                  "graph_data/%s_plan_pow_pre.npy" % subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.efficiency_wei(g) for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.efficiency_wei(g) for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)

    cv_params = {
Exemplo n.º 32
0
def calc_graph_vector(filename, thresholds) :
    '''
    This function calculates graph measures for connectivity matrix loaded from textfile
    and save results under the same name with additional superscript +'_GV' (in same dir
    filename is located)
    
    Input arguments:                                               
        filename(str):     name of file containing connectivity matrix (txt extension)
        thresholds(list):  list containing thresholds of interest        #
    
    Kamil Bonna, 14.08.2018 
    '''
    #--- check inputs
    import os
    if not os.path.exists(filename):
        raise Exception('{} does not exist'.format(filename))
    if type(thresholds) != list: 
        raise Exception('thresholds should be a list!')
        
    import numpy as np
    import bct

    #=== inner variables
    N_rep_louvain = 10   # number of Louvain algorithm repetitions
    N_measures = 10      # number of graph measures
    gamma = 1            # Louvain resolution parameter
    
    #--- load matrix 
    A_raw = np.loadtxt(filename)
    N = A_raw.shape[0]   # number of nodes
    M_sat = N*(N-1)/2    # max number of connections 

    #=== calculate output
    graph_measures = np.zeros([ len(thresholds), N_measures ])  # create empty output matrix
    for thr in range(len(thresholds)) : 
        #--- thresholding 
        A = bct.threshold_proportional( A_raw, p=thresholds[thr], copy=True );
        A[np.nonzero(A<0)] = 0                                  # ensure only positive weights
        M_act = A[np.nonzero(A>0)].shape[0] / 2                 # actual number of nonzero connections
        #--- calculate measures
        #-- mean connection strenght 
        S = np.sum(A)/M_act
        #-- connection strenght std
        Svar = np.std(A[np.nonzero(A)])
        #-- modularity
        [M,Q] = bct.modularity_louvain_und(A, gamma)
        for i in range(N_rep_louvain) :
            [Mt,Qt] = bct.modularity_louvain_und(A, gamma)
            if Qt > Q :
                Q = Qt
                M = Mt
        #-- participation coefficient
        P = np.mean(bct.participation_coef_sign(A, M))
        #-- clustering 
        C = np.mean(bct.clustering_coef_wu(A))
        #-- transitivity 
        T = bct.transitivity_wu(A)
        #-- assortativity
        Asso = bct.assortativity_wei(A)
        #-- global & local efficiency 
        Eglo = bct.efficiency_wei(A)
        Eloc = np.mean(bct.efficiency_wei(A, local=True))
        #-- mean eigenvector centralit
        Eig = np.mean(bct.eigenvector_centrality_und(A))
        #--- write vector to matrix
        graph_measures[thr] = [ S, Svar, Q, P, C, T, Asso, Eglo, Eloc, Eig ]

    #=== save results to file
    np.savetxt( filename[:-4]+'_GV.txt', graph_measures )