def test_participation():
    W = np.eye(3)
    ci = np.array([1, 1, 2])

    assert np.allclose(bct.participation_coef(W, ci), [0, 0, 0])
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0, 0, 0])

    W = np.ones((3, 3))
    assert np.allclose(bct.participation_coef(W, ci), [
                       0.44444444, 0.44444444, 0.44444444])
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [
                       0.44444444, 0.44444444, 0.44444444])

    W = np.eye(3)
    W[0, 1] = 1
    W[0, 2] = 1
    assert np.allclose(bct.participation_coef(W, ci), [0.44444444, 0, 0])
    assert np.allclose(bct.participation_coef_sign(W, ci)
                       [0], [0.44444444, 0, 0])

    W = np.eye(3)
    W[0, 1] = -1
    W[0, 2] = -1
    W[1, 2] = 1
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0.,  0.5,  0.])
def participation_test():
    W = np.eye(3)
    ci = np.array([1, 1, 2])

    assert np.allclose(bct.participation_coef(W, ci), [0, 0, 0])
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0, 0, 0])

    W = np.ones((3, 3))
    assert np.allclose(bct.participation_coef(W, ci), [
                       0.44444444, 0.44444444, 0.44444444])
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [
                       0.44444444, 0.44444444, 0.44444444])

    W = np.eye(3)
    W[0, 1] = 1
    W[0, 2] = 1
    assert np.allclose(bct.participation_coef(W, ci), [0.44444444, 0, 0])
    assert np.allclose(bct.participation_coef_sign(W, ci)
                       [0], [0.44444444, 0, 0])

    W = np.eye(3)
    W[0, 1] = -1
    W[0, 2] = -1
    W[1, 2] = 1
    assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0.,  0.5,  0.])
 def calculate_communities(self, reorder=False, **kwargs):
     assert self.community_alg is not None, \
         print("Community algorithm has not been set!")
     G = self.G
     graph_mat = self.graph_mat
     # calculate community structure
     comm, mod = self.community_alg(graph_mat, **kwargs)
     # if there is a reference, relabel communities based on their closest association
     if self.ref_community:
         comm = self._relabel_community(comm, self.ref_community)
     # label vertices of G
     G.vs['community'] = comm
     G.vs['within_module_degree'] = bct.module_degree_zscore(
         graph_mat, comm)
     if np.min(graph_mat) < 0:
         participation_pos, participation_neg = bct.participation_coef_sign(
             graph_mat, comm)
         G.vs['part_coef_pos'] = participation_pos
         G.vs['part_coef_neg'] = participation_neg
     else:
         G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)
     if reorder:
         self.reorder()
     # calculate subgraph (within-community) characteristics
     self._subgraph_analysis()
     return mod
Exemple #4
0
def run_graph(sub):
    '''run graph analysis, get PC/WMD/Q'''
    for condition in Conditions:
        for atlas in Atlases:

            matrix = []
            for direction in Directions:
                fn = datapath + str(
                    sub) + '_' + condition + direction + atlas + '.corrmat'
                try:
                    matrix += [np.loadtxt(fn)]
                except:
                    break

            PCs = []
            WMDs = []
            Qs = []
            if np.shape(matrix)[0] > 1:
                matrix = np.sum(matrix, axis=0) / np.shape(matrix)[0]

                for c in np.arange(0.05, 0.16, 0.01):
                    mat_th = threshold(matrix.copy(), c)
                    graph = matrix_to_igraph(matrix.copy(), c)
                    i = graph.community_infomap(edge_weights='weight')
                    CI = np.asarray(i.membership) + 1
                    PCs += [bct.participation_coef(mat_th, CI)]
                    WMDs += [bct.module_degree_zscore(mat_th, CI)]
                    Qs += [i.modularity]

                fn = datapath + str(sub) + '_' + condition + atlas + '.PC'
                np.save(fn, PCs)
                fn = datapath + str(sub) + '_' + condition + atlas + '.WMD'
                np.save(fn, WMDs)
                fn = datapath + str(sub) + '_' + condition + atlas + '.Q'
                np.save(fn, Qs)
def calc_particip(G, partition, A, C):
    # Calculate participation coefficient for each node
    P = participation_coef(A, C)
    # Create a dictionary keyed by RSN, values are lists of particip coef of every node in RSN
    particip = collections.defaultdict(list)
    for ind, p in enumerate(P):
        particip[partition[list(G.nodes())[ind]]].append(p)

    return particip
Exemple #6
0
    def compute(self):
        try:
            name = "Results/Communities/" + self.name + "_" + self.communities_algorithm + "_communities.node"
            print('Trying', name)
            last_results = pd.read_csv(
                name,
                " ",
                names=['X', 'Y', 'Z', 'Community', 'Degree', 'RegionName'])
            print('Loading communities...')
            communities = last_results['Community']
            region_names = last_results['RegionName']

        except Exception:
            print('Divinding communities...')
            communities = NM(self.g, self.name, self.stats,
                             self.communities_algorithm).compute()
            region_names = pd.read_csv("data/lobes.node", " ",
                                       header='infer')['RegionName']

        participations = bct.participation_coef(self.g, communities,
                                                'undirected')
        within_degrees = bct.module_degree_zscore(self.g, communities, 0)

        connectors, provincials = [], []
        for node in zip(participations, within_degrees, region_names):
            if node[1] > 1:
                if node[0] > 0.6:
                    connectors.append((node[2], node[1], node[0]))
                if node[0] < 0.4:
                    provincials.append((node[2], node[1], node[0]))

        print("Connectors", connectors)
        print("Provincials", provincials)

        self.stats['RegionName'] = region_names
        self.stats['Community'] = communities
        self.stats['wMD'] = within_degrees
        self.stats['PC'] = participations

        plots.create_box_plot_nodes('PC', self.stats, self.name)

        plots.create_plot_communitycolored(
            "reports/plots/ParticipationCoeff/" + str(self.name) + '_' +
            str(self.communities_algorithm) + ".pdf",
            ('fMRI' if self.name == 'fmri' else 'EEG-' + self.name),
            'Within-module Degree Z-score',
            within_degrees,
            'Participation Coefficient',
            participations,
            communities=communities,
            xticks=[-3, -2, -1, 0, 1, 2, 3],
            yticks=[0, 0.2, 0.4, 0.6, 0.8, 1.0])

        return self.stats
def test_pc():
    x = load_sample(thres=.4)
    # ci,q = bct.modularity_und(x)
    ci = np.load(mat_path('sample_partition.npy'))

    pc = np.load(mat_path('sample_pc.npy'))

    pc_ = bct.participation_coef(x, ci)
    print(list(zip(pc, pc_)))

    assert np.allclose(pc, pc_, atol=0.02)
def test_pc():
    x = load_sample(thres=.4)
    # ci,q = bct.modularity_und(x)
    ci = np.load(mat_path('sample_partition.npy'))

    pc = np.load(mat_path('sample_pc.npy'))

    pc_ = bct.participation_coef(x, ci)
    print(list(zip(pc, pc_)))

    assert np.allclose(pc, pc_, atol=0.02)
Exemple #9
0
def cal_dynamic_graph(MTD, impose=False, threshold=False):
    '''calculate graph metrics across time(dynamic)'''
    #setup outputs
    time_points = MTD.shape[0]
    ci = np.zeros([MTD.shape[1], MTD.shape[0]])
    q = np.zeros([MTD.shape[0]])
    WMD = np.zeros([MTD.shape[1], MTD.shape[0]])
    PC = np.zeros([MTD.shape[1], MTD.shape[0]])
    WW = np.zeros([MTD.shape[1], MTD.shape[0]])
    BW = np.zeros([MTD.shape[1], MTD.shape[0]])

    #modularity
    if impose:
        ci = np.tile(
            np.loadtxt(
                '/home/despoB/kaihwang/Rest/ThaGate/ROIs/Morel_Striatum_Gordon_CI'
            ), [time_points, 1]).T

    for i, t in enumerate(range(0, time_points)):
        matrix = MTD[i, :, :]

        #need to deal with NANs because of coverage (no signal in some ROIs)
        matrix[np.isnan(matrix)] = 0

        #threshold here
        if threshold:
            matrix = bct.threshold_proportional(matrix, threshold)

        #modularity
        if impose == False:
            ci[:, i], q[i] = bct.modularity_louvain_und_sign(matrix)

        #PC
        # for now, no negative weights
        matrix[matrix < 0] = 0
        PC[:, i] = bct.participation_coef(matrix, ci[:, i])

        #WMD
        WMD[:, i] = bct.module_degree_zscore(matrix, ci[:, i])

        ## within weight
        WW[:, i] = cal_within_weight(matrix, ci[:, i])

        ## between Weight
        BW[:, i] = cal_between_weight(matrix, ci[:, i])

        # cal q using impsose CI partition
        if impose:
            q[i] = cal_modularity_w_imposed_community(matrix, ci[:, i])

    return ci, q, PC, WMD, WW, BW
Exemple #10
0
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
Exemple #11
0
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
Exemple #12
0
def test_pipline():
    ''' A run thorugh test using a csv input from Aaron'''
    # load matrix
    matrix = np.genfromtxt('HCP_MMP1_roi-pair_corr.csv',
                           delimiter=',',
                           dtype=None)
    matrix[np.isnan(matrix)] = 0.0

    # step through costs, do infomap, return final infomap across cost
    max_cost = .15
    min_cost = .01

    # ave consensus across costs
    partition = ave_consensus_costs_parition(matrix, min_cost, max_cost)
    partition = np.array(partition) + 1

    # import thresholded matrix to BCT, import partition, run WMD/PC
    PCs = np.zeros((len(np.arange(min_cost, max_cost + 0.01,
                                  0.01)), matrix.shape[0]))
    WMDs = np.zeros((len(np.arange(min_cost, max_cost + 0.01,
                                   0.01)), matrix.shape[0]))

    for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):

        tmp_matrix = threshold(matrix.copy(), cost)

        #PC
        PCs[i, :] = bct.participation_coef(tmp_matrix, partition)
        #WMD
        WMDs[i, :] = bct.module_degree_zscore(matrix, partition)

    np.save("partition", partition)
    np.save("PCs", PCs)
    np.save("WMDs", WMDs)

    #altantively, merge consensus using the power method
    recursive_partition = power_recursive_partition(matrix, min_cost, max_cost)
    recursive_partition = recursive_partition + 1

    np.save('rescursive_partition', recursive_partition)
Exemple #13
0
def cal_sFC_graph(subject, sequence, roi, impose=False, threshold=1.0):
    ''' load TS and run static FC'''
    ts_path = '/home/despoB/kaihwang/Rest/ThaGate/NotBackedUp/'
    fn = ts_path + str(subject) + '_%s_%s_000.netts' % (roi, sequence)
    ts = np.loadtxt(fn)

    matrix = np.corrcoef(ts)
    matrix[np.isnan(matrix)] = 0

    matrix = bct.threshold_proportional(matrix, threshold)

    num_iter = 200
    consensus = np.zeros((num_iter, matrix.shape[0], matrix.shape[1]))

    for i in np.arange(0, num_iter):
        ci, _ = bct.modularity_louvain_und_sign(matrix, qtype='sta')
        consensus[i, :, :] = community_matrix(ci)

    mean_matrix = np.nanmean(consensus, axis=0)
    mean_matrix[np.isnan(mean_matrix)] = 0
    CI, Q = bct.modularity_louvain_und_sign(mean_matrix, qtype='sta')

    #no negative weights
    matrix[matrix < 0] = 0

    PC = bct.participation_coef(matrix, CI)

    #WMD
    WMD = bct.module_degree_zscore(matrix, CI)

    ## within weight
    WW = cal_within_weight(matrix, CI)

    ## between Weight
    BW = cal_between_weight(matrix, CI)

    return CI, Q, PC, WMD, WW, BW
 def calculate_communities(self, reorder=False, **kwargs):
     assert self.community_alg is not None, \
         print("Community algorithm has not been set!")
     G = self.G
     graph_mat = self.graph_mat
     # calculate community structure
     comm, mod = self.community_alg(graph_mat, **kwargs)
     # if there is a reference, relabel communities based on their closest association    
     if self.ref_community:
         comm = self._relabel_community(comm,self.ref_community)
     # label vertices of G
     G.vs['community'] = comm
     G.vs['within_module_degree'] = bct.module_degree_zscore(graph_mat,comm)
     if np.min(graph_mat) < 0:
         participation_pos, participation_neg = bct.participation_coef_sign(graph_mat, comm)
         G.vs['part_coef_pos'] = participation_pos
         G.vs['part_coef_neg'] = participation_neg
     else:
         G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)
     if reorder:
         self.reorder()
     # calculate subgraph (within-community) characteristics
     self._subgraph_analysis()
     return mod
def load_network(kind,
                 parcel,
                 data="lau",
                 weights='log',
                 hemi="both",
                 version=1,
                 subset="all",
                 path=None):
    '''
    Function to load a dictionary containing information about the specified
    brain network.

    Parameters
    ----------
    kind : string
        Either 'SC' or 'FC'.
    hemi : string
        Either "both", "L" or "R".
    weights " string
        The weights of the edges. The options  "normal", "log" or "binary".
        The default is "log".
    data : string
        Either "HCP" or "lau".
    parcel : string
        Either "68", "114", ... [if 'lau'] / "s400", "s800" [if "HCP"]
    version : int
        Version of the network.
    subset : string
        Either 'discov', 'valid' or 'all'
    path : string
        path to the "data" folder in which the data will be stored. If
        none, then assumes that path is current folder.

    Returns
    -------
    Network : dictionary
        Dictionary storing relevant attributes about the network
    '''

    # Initialize dictionary + store basic information about the network
    Network = {}
    Network["info"] = {}
    Network["info"]["kind"] = kind
    Network["info"]["parcel"] = parcel
    Network["info"]["data"] = data
    Network["info"]["hemi"] = hemi
    Network["info"]["weights"] = weights
    Network["info"]["version"] = version
    Network["info"]["subset"] = subset

    # Modify parameter names to what they are in file names
    version = '' if version == 1 else '_v' + str(version)
    subset = '' if subset == 'all' else subset
    hemi = '' if hemi == 'both' else hemi

    # Store important paths for loading the relevant data
    main_path = path + "/brainNetworks/" + data + "/"
    network_path = (main_path + "matrices/consensus/" + subset + kind +
                    parcel + hemi + version + "/")
    matrix_path = network_path + "/" + weights

    # Store general information about the network's parcellation
    parcel_info = get_general_parcellation_info(parcel)
    Network['order'] = parcel_info[0]
    Network['noplot'] = parcel_info[1]
    Network['lhannot'] = parcel_info[2]
    Network['rhannot'] = parcel_info[3]
    Network['atlas'] = parcel_info[4]

    # Load the cammoun_id of the parcellation, if Cammoun (i.e. 033, 060, etc.)
    if parcel[0] != 's':
        Network['cammoun_id'] = parcel_to_n(parcel)

    # masks
    masks = get_node_masks(Network, path=main_path)
    Network['node_mask'] = masks[0]
    Network['hemi_mask'] = masks[1]
    Network['subcortex_mask'] = masks[2]

    # hemisphere
    Network['hemi'] = get_hemi(Network, path=main_path)

    # coordinates
    Network['coords'] = get_coordinates(Network, path=main_path)

    # Adjacency matrix
    Network['adj'], last_modified = get_adjacency(Network,
                                                  matrix_path,
                                                  minimal_processing=True,
                                                  return_last_modified=True)

    # Test whether the network is connected. Raise a warning if not...
    if not np.all(bct.reachdist(Network['adj'])[0]):
        warnings.warn(("This brain network appears to be disconnected. This "
                       "might cause problem for the computation of the other "
                       "measures"))

    # node strength
    Network["str"] = np.sum(Network['adj'], axis=0)

    # Inverse of adjacency matrix
    inv = Network['adj'].copy()
    inv[Network['adj'] > 0] = 1 / inv[Network['adj'] > 0]
    Network["inv_adj"] = inv

    # distance
    Network["dist"] = cdist(Network["coords"], Network["coords"])

    # clustering coefficient
    Network["cc"] = bct.clustering_coef_wu(Network['adj'])

    # shortest path
    Network['sp'] = get_shortest_path(Network,
                                      matrix_path=matrix_path,
                                      last_modified=last_modified)

    # diffusion embedding
    de = compute_diffusion_map(Network['adj'],
                               n_components=10,
                               return_result=True,
                               skip_checks=True)
    Network["de"] = de[0]
    Network["de_extra"] = de[1]

    # Principal components
    Network['PCs'], Network['PCs_ev'] = getPCs(Network['adj'])

    # eigenvector centrality
    Network["ec"] = bct.eigenvector_centrality_und(Network['adj'])

    # mean first passage time
    Network["mfpt"] = bct.mean_first_passage_time(Network['adj'])

    # betweenness centrality
    Network['bc'] = get_betweenness(Network,
                                    matrix_path=matrix_path,
                                    last_modified=last_modified)

    # routing efficiency
    Network["r_eff"] = efficiency(Network)

    # diffusion efficiency
    Network["d_eff"] = efficiency_diffusion(Network)

    # subgraph centrality
    Network["subc"] = bct.subgraph_centrality(Network["adj"])

    # closeness centrality
    Network['clo'] = 1 / np.mean(Network['sp'], axis=0)

    # communities + participation coefficient
    path = matrix_path + "/communities/"
    if os.path.exists(path):
        files = []
        for i in os.listdir(path):
            if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                files.append(i)
        if len(files) > 0:
            Network["ci"] = []
            for file in files:
                Network["ci"].append(np.load(os.path.join(path, file)))

            Network["ppc"] = []
            for i in range(len(files)):
                ppc = bct.participation_coef(Network['adj'], Network["ci"][i])
                Network["ppc"].append(ppc)

    # Edge lengths
    if (data == "HCP") and (kind == "SC"):
        path = main_path + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
        if os.path.exists(path):
            Network["lengths"] = np.load(path)

    # streamline connection lengths
    path = network_path + "/len.npy"
    if os.path.exists(path):
        Network['len'] = np.load(path)

    # ROI names
    if parcel[0] != "s":
        Network['ROInames'] = get_ROInames(Network)

    # geodesic distances between nodes
    if parcel[0] == "s":
        n = parcel[1:]
        fname_l = n + "Parcels7Networks_lh_dist.csv"
        fname_r = n + "Parcels7Networks_rh_dist.csv"
    else:
        fname_l = "scale" + Network['cammoun_id'] + "_lh_dist.csv"
        fname_r = "scale" + Network['cammoun_id'] + "_rh_dist.csv"
    Network['geo_dist_L'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_l,
                                        header=None).values
    Network['geo_dist_R'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_r,
                                        header=None).values

    return Network
#
#    medianEL = np.median(np.array((el.values()), dtype=float))
#    medianELNorm = np.median(np.array((elNorm), dtype=float))
#    extras.writeResults(medianEL, "medianEL", ofb, propDict=propDict, append=appVal)
#    extras.writeResults(medianELNorm, "medianELNorm", ofb, propDict=propDict, append=appVal)
#
#    del(el, elNorm, meanEL, meanELNorm, medianEL, medianELNorm)
    
    # modularity metrics
    ci = bct.modularity_louvain_und(a.bctmat)
    Q = ci[1]
    ciN = a.assignbctResult(ci[0])
    extras.writeResults(Q, "Q", ofb, propDict=propDict, append=appVal)
    extras.writeResults(ciN, "ci", ofb , propDict=propDict, append=appVal)  
    
    pcCent = bct.participation_coef(a.bctmat,ci[0])
    pcCent = a.assignbctResult(pcCent)
    extras.writeResults(pcCent, "pcCent", ofb, propDict=propDict, append=appVal)
    del pcCent
    
    wmd = extras.withinModuleDegree(a.G, ciN)
    extras.writeResults(wmd, "wmd", ofb, append=appVal)
    del wmd    
    
    nM = len(np.unique(ci[0]))
    extras.writeResults(nM, "nM", ofb, propDict=propDict, append=appVal)
    del(nM)
    del(ci, ciN, Q)
    
#    # rich club measures
#    rc = mbt.nx.rich_club_coefficient(a.G, normalized=False)
Exemple #17
0
    def centrality(self,
                   sbj_number,
                   nodes_number,
                   atlas,
                   make_symmetric=True,
                   upper_threshold=None,
                   lower_threshold=None,
                   binarize=False):
        '''
        Computing centrality measures of the adjencency matrix


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        atlas: excel file |
                    please se example available in the repo (e.g. new_atlas_coords.xlsx)
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        upper_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        under that threshold will be 0 (Default is None)
        lower_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        above that threshold will be 0 (Default is None)
        binarize= Boolean|
                        True will make the connectivity matrix binary
                        Default is False


        Returns
        -------

        dict: : dictonary with the following keys |

        edge_betweeness_bin: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        edge_betweeness_wei: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        eigenvector_centrality_und: | np.ndarray
                            Eigenector centrality is a self-referential measure
                            of centrality: nodes have high eigenvector centrality
                            if they connect to other nodes that have high
                            eigenvector centrality. The eigenvector centrality of
                            node i is equivalent to the ith element in the eigenvector
                            corresponding to the largest eigenvalue of the adjacency matrix.
                            It will return the eigenvector associated with the
                            largest eigenvalue of the matrix
        coreness_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the node coreness.
        kn_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the size of k-core
        module_degree_zscore: | np.ndarray
                            The within-module degree z-score is a within-module
                            version of degree centrality. It will return
                            within-module degree Z-score
        participation_coef: | np.ndarray
                            Participation coefficient is a measure of diversity
                            of intermodular connections of individual nodes.
                            It will return the participation coefficient
        subgraph_centrality: | np.ndarray
                            The subgraph centrality of a node is a weighted sum
                            of closed walks of different lengths in the network
                            starting and ending at the node. This function returns
                            a vector of subgraph centralities for each node of the
                            network. It will return the subgraph centrality

        '''

        with open(self.net_label_txt) as f:
            net = f.read().splitlines()

        self.atlas = pd.read_excel(atlas, header=None)
        self.atlas = np.array(self.atlas)
        self.ci_original = self.atlas[:, 8]

        self.centrality = {
            "edge_betweeness_bin":
            np.zeros([sbj_number, nodes_number]),
            "edge_betweeness_wei":
            np.zeros([sbj_number, nodes_number]),
            "eigenvector_centrality_und":
            np.zeros([sbj_number, nodes_number]),
            "coreness_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "kn_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "module_degree_zscore":
            np.zeros([sbj_number, nodes_number]),
            "participation_coef":
            np.zeros([sbj_number, nodes_number]),
            "subgraph_centrality":
            np.zeros([sbj_number, nodes_number])
        }

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            self.max = np.max(self.matrix.flatten())
            if upper_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < upper_threshold * self.max / 100] = 0
            if lower_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix > lower_threshold * self.max / 100] = 0

            self.matrix_bin = bct.algorithms.binarize(self.matrix)
            self.matrix_weight = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            np.fill_diagonal(self.matrix, 0)
            np.fill_diagonal(self.matrix_bin, 0)
            np.fill_diagonal(self.matrix_weight, 0)

            self.BC = bct.betweenness_bin(self.matrix_bin)
            self.centrality['edge_betweeness_bin'][subj] = self.BC

            self.BC_w = bct.betweenness_wei(self.matrix_weight)
            self.centrality['edge_betweeness_wei'][subj] = self.BC_w

            self.v = bct.eigenvector_centrality_und(self.matrix)
            self.centrality['eigenvector_centrality_und'][subj] = self.v

            self.coreness, self.kn = bct.kcoreness_centrality_bu(
                self.matrix_bin)
            self.centrality['coreness_kcoreness_centrality_bu'][
                subj] = self.coreness
            self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn

            self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original)
            self.centrality['module_degree_zscore'][subj] = self.Z

            self.P = bct.participation_coef(self.matrix, ci=self.ci_original)
            self.centrality['participation_coef'][subj] = self.P

            self.Cs = bct.subgraph_centrality(self.matrix_bin)
            self.centrality['subgraph_centrality'][subj] = self.Cs

        return self.centrality
Exemple #18
0
def figure_4(data, specify, t_ids_b, method='laplacian', panels='all',
             show=True, save=False, save_path=None):
    '''
    Function to create Figure 4

    Parameters
    ----------
    data : dict
        Dictionary of the data to be used to generate the figures. If the
        required data is not found in this dictionary, the item of figure
        1 that requires this data will not be created and a message
        will be printed.
    specify : dict
        Dictionary containing information about the trajectories that will be
        shown in panel a (see main_script.py for an example).
    t_ids_b: List
        List of time points indices indicating the time points at which
        the centrality slope distributions will be plotted on the surface
        of the brain (panel b).
    method : str
        Method used to compute the transition probabilities. The purpose of
        this parameter is to choose whether the x_scale of our figures should
        be linear or logarithmic.
    panels : str or list
        List of the panels of Figure 1 that we want to create. If we want
        to create all of them, use 'all'. Otherwise, individual panels can
        be specified. For example, we could have panels=['a'] or
        panels=['a', 'b'].
    show : Boolean
        If True, the figures will be displayed. If not, the figures will
        bot be displayed.
    save : Boolean
        If True, the figures will be saved in the folder specified by the
        save_path parameter.
    save_path : str
        Path of the folder in which the figures will be saved.
    '''

    if show is False:
        plt.ioff()

    if panels == 'all':
        panels = ['a', 'b', 'c']

    n = len(data['sc'])
    k = len(data['t_points'])

    # Slopes of the closeness centrality trajectories
    slopes = np.gradient(data['cmulti'], axis=0)

    if 'a' in panels:

        required_entries = ['t_points', 'cmulti']
        requirements = check_requirements(data, required_entries)
        if requirements:

            node_ids = specify['ID']

            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        data['cmulti'][:, i],
                        c='lightgray')

            abs_max_color = max(-1 * np.amin(slopes), np.amax(slopes))

            for i, id in enumerate(node_ids):
                norm = plt.Normalize(-abs_max_color, abs_max_color)
                slope_colors = RdBu_11_r.mpl_colormap(norm(slopes[:, id]))
                for ii in range(k-1):
                    plt.plot([data['t_points'][ii],
                              data['t_points'][ii+1]],
                             [data['cmulti'][ii, id],
                              data['cmulti'][ii+1, id]],
                             c=slope_colors[ii, :],
                             linewidth=3)

            if method == 'laplacian':
                plt.xscale('log')

            plt.xlabel('t')
            plt.ylabel("c_multi")

            if save:
                figure_name = 'cmulti_with_gradient.png'
                fig.savefig(os.path.join(save_path, figure_name))

    if 'b' in panels:

        required_entries = ['t_points', 'lhannot', 'rhannot', 'noplot',
                            'order']
        requirements = check_requirements(data, required_entries)
        if requirements:

            fig = plt.figure(figsize=(9, 3))
            ax = fig.add_subplot(111)
            for i in range(n):
                ax.plot(data['t_points'],
                        slopes[:, i],
                        c='lightgray',
                        zorder=0)
            for t_id in t_ids_b:
                plt.scatter(np.zeros((n))+data['t_points'][t_id],
                            slopes[t_id, :],
                            marker='s', c=slopes[t_id, :],
                            cmap=RdBu_11_r.mpl_colormap, rasterized=True,
                            zorder=1)

            if method == 'laplacian':
                plt.xscale('log')

            plt.xlabel('t')
            plt.ylabel("slope")

            if save:
                figure_name = 'slopes.png'
                fig.savefig(os.path.join(save_path, figure_name))

            for t_id in t_ids_b:
                im = plot_fsaverage(slopes[t_id, :],
                                    lhannot=data['lhannot'],
                                    rhannot=data['rhannot'],
                                    noplot=data['noplot'],
                                    order=data['order'],
                                    views=['lateral', 'm'],
                                    vmin=np.amin(slopes[t_id, :]),
                                    vmax=np.amax(slopes[t_id, :]),
                                    colormap=RdBu_11_r.mpl_colormap)

                if save:
                    figure_name = ('slopes_brain_surface_' +
                                   str(int(round(data['t_points'][t_id]))) +
                                   '.png')
                    im.save_image(os.path.join(save_path, figure_name),
                                  mode='rgba')

    if 'c' in panels:

        required_entries = ['sc', 't_points', 'ci']
        requirements = check_requirements(data, required_entries)
        if requirements:

            measures = []
            labels = []

            measures.append(np.sum(data['sc'], axis=0))
            labels.append("strength")

            measures.append(-bct.clustering_coef_wu(data['sc']))
            labels.append("clustering(-)")

            for ci in data['ci']:
                measures.append(bct.participation_coef(data['sc'], ci))
                labels.append(("participation (" +
                               str(int(round(ci.max()))) +
                               ")"))

            k = len(data['t_points'])
            m = len(measures)

            corrs = np.zeros((m, k))
            for i in range(m):
                for j in range(k):
                    corrs[i, j] = pearsonr(slopes[j, :], measures[i])[0]

            corr_min = np.amin(corrs)
            corr_max = np.amax(corrs)

            for i in range(m):
                plt.figure()
                plt.imshow(corrs[i, :][np.newaxis, :],
                           cmap=Spectral_4_r.mpl_colormap,
                           vmin=corr_min,
                           vmax=corr_max,
                           aspect=0.1 * k)
                plt.axis('off')
                plt.title(labels[i])

                if save is True:
                    figure_name = "correlations_" + labels[i] + ".png"
                    plt.savefig(os.path.join(save_path, figure_name))

    if show is False:
        plt.close('all')
        plt.ion()
Exemple #19
0
def cal_thalamus_and_cortical_ROIs_nodal_properties(Thalamocortical_corrmat, Cortical_adj, \
 Cortical_plus_thalamus_CI, Thalamus_CIs, Cortical_CI, Cortical_ROIs_positions, Thalamus_voxel_positions, cost_thresholds):
    '''Function to calculate voxel-wise nodal properties of the thalamus, and nodal properties of cortical ROIs. 
	Metrics to be calculated include:
	
	Participation Coefficient (PC)
	Between network connectivity weiight (BNWR)
		Ratio of connection weight devoted to between network interactions
	Number of network/modules/components connected (NNC)
	Within module degree zscore (WMD)
		For WMD, matrices will be binarzied, and normalized to corticocortical connections' mean and SD

	usage: PCs, BNWRs, NNCs, WMDs, bPCs, mean_NNC, mean_BNWR, mean_PC, mean_bPC, mean_WMD = cal_thalamus_and_cortical_ROIs_nodal_properties(Thalamocor_adj,
                Cortical_adj,
                Cortical_plus_thalamus_CI,
                Thalamus_CIs,
                Cortical_CI,
                Cortical_ROIs_positions,
                Thalamus_voxel_positions,
                cost_thresholds)
    
    ----
    Parameters
    ----
    Thalamocor_adj: Thalamocortical adj matrix
    Cortical_adj: corticocortical adj matrix
    Cortical_plus_thalamus_CI: A vector of community/module/network assignment of all nodes, cortical ROIs + thalamic voxels
    Thalamus_CIs: A vector of network assignements for thalamic voxels
    Cortical_CI: A vector of network assignments for cortical ROIs 
    Cortical_ROIs_positions: a position vector indicating in the thalamocortical adj matrix which rows/columns are cortical ROIs
    Thalamus_voxel_posistions: a position vector indicating in the thalamocortical adj matrix which rows/columns are thalamic voxels
    cost_thresholds: the thoresholds that can threshold the thalamocortical edges at density .01 to .15. 

	return variables are graph metrics across thresholds (with "s"), or averaged across thresholds "mean"

    '''

    ##Thalamus nodal roles
    Thalamocortical_corrmat[np.isnan(Thalamocortical_corrmat)] = 0

    #PC
    PCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    bPCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    #BNWR between network connectivity weight
    BNWRs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    #get number of networks/communities connected
    NNCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)

    #loop through costs
    for c in cost_thresholds:
        #copy adj matrix and then threshold
        Par_adj = Thalamocortical_corrmat.copy()
        #remove weights connected to low SNR communities (CI==0, orbital frontal, inferior temporal)
        Par_adj[Cortical_ROIs_positions[Cortical_CI == 0], :] = 0
        Par_adj[:, Cortical_ROIs_positions[Cortical_CI == 0]] = 0
        Par_adj[Par_adj < c] = 0

        #binary
        bPar_adj = Par_adj.copy()
        bPar_adj = bPar_adj > c

        #PC
        PCs += [bct.participation_coef(Par_adj, Cortical_plus_thalamus_CI)]
        bPCs += [bct.participation_coef(bPar_adj, Cortical_plus_thalamus_CI)]
        #aPCs += [bct.participation_coef(Par_adj, Cortical_plus_thalamus_CI)]

        #BNWR and NNCs
        Tha_BNWR = np.zeros(Cortical_plus_thalamus_CI.size)
        Tha_NNCs = np.zeros(Cortical_plus_thalamus_CI.size)
        for ix, i in enumerate(Thalamus_voxel_positions):
            sum_between_weight = np.nansum(
                Par_adj[i, Cortical_plus_thalamus_CI != Thalamus_CIs[ix]])
            sum_total = np.nansum(Par_adj[i, :])
            Tha_BNWR[i] = sum_between_weight / sum_total
            Tha_BNWR[i] = np.nan_to_num(Tha_BNWR[i])

            Tha_NNCs[i] = len(
                np.unique(Cortical_plus_thalamus_CI[Par_adj[i, ] != 0]))
        BNWRs += [Tha_BNWR]
        NNCs += [Tha_NNCs]

    ##Cortical nodal roles
    Cortical_adj[np.isnan(Cortical_adj)] = 0

    Cortical_PCs = []  #np.zeros(Cortical_CI.size)
    Cortical_bPCs = []  #np.zeros(Cortical_CI.size)
    Cortical_BNWR = []  #np.zeros(Cortical_CI.size)
    Cortical_NNCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)

    for ix, c in enumerate(np.arange(0.01, 0.16, 0.01)):
        M = bct.threshold_proportional(Cortical_adj, c, copy=True)
        bM = bct.weight_conversion(M, 'binarize', copy=True)

        #PC
        Cortical_PCs += [bct.participation_coef(M, Cortical_CI)]
        Cortical_bPCs += [bct.participation_coef(bM, Cortical_CI)]

        #BNWR and NNC
        BNWR = np.zeros(Cortical_CI.size)
        Cor_NNCs = np.zeros(Cortical_plus_thalamus_CI.size)
        for i in range(len(Cortical_CI)):
            sum_between_weight = np.nansum(M[i, Cortical_CI != Cortical_CI[i]])
            sum_total = np.nansum(M[i, :])
            BNWR[i] = sum_between_weight / sum_total
            BNWR[i] = np.nan_to_num(BNWR[i])

            Cor_NNCs[i] = len(np.unique(Cortical_CI[M[i, ] != 0]))
        Cortical_BNWR += [BNWR]
        Cortical_NNCs += [Cor_NNCs]

    #do WMD, first convert matrices to binary, then calcuate z score using mean and std of "corticocortical degrees"
    Cortical_wm_mean = {}
    Cortical_wm_std = {}
    Cortical_WMDs = []  #np.zeros(Cortical_CI.size)
    WMDs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    for ix, c in enumerate(np.arange(0.01, 0.16, 0.01)):

        #threshold by density
        bM = bct.weight_conversion(
            bct.threshold_proportional(Cortical_adj, c, copy=True), 'binarize')
        Cortical_WMDs += [bct.module_degree_zscore(bM, Cortical_CI)]

        #return mean and degree
        for CI in np.unique(Cortical_CI):
            Cortical_wm_mean[ix + 1, CI] = np.nanmean(
                np.sum(bM[Cortical_CI == CI, :][:, Cortical_CI == CI], 1))
            Cortical_wm_std[ix + 1, CI] = np.nanstd(
                np.sum(bM[Cortical_CI == CI, :][:, Cortical_CI == CI], 1))

        #thalamic WMD, threshold by density
        M = bct.weight_conversion(
            bct.threshold_absolute(Thalamocortical_corrmat,
                                   cost_thresholds[ix],
                                   copy=True), 'binarize')

        tha_wmd = np.zeros(Cortical_plus_thalamus_CI.size)
        for i in np.unique(Cortical_CI):
            tha_wmd[Cortical_plus_thalamus_CI==i] = (np.sum(M[Cortical_plus_thalamus_CI==i][:, Cortical_plus_thalamus_CI==i],1)\
            - Cortical_wm_mean[ix+1,i])/Cortical_wm_std[ix+1,i]
        tha_wmd = np.nan_to_num(tha_wmd)
        WMDs += [tha_wmd]

    # organize output
    NNCs = np.array(NNCs)
    BNWRs = np.array(BNWRs)
    PCs = np.array(PCs)
    bPCs = np.array(bPCs)
    WMDs = np.array(WMDs)

    NNCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_NNCs)[:, Cortical_ROIs_positions]
    BNWRs[:, Cortical_ROIs_positions] = np.array(
        Cortical_BNWR)[:, Cortical_ROIs_positions]
    PCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_PCs)[:, Cortical_ROIs_positions]
    bPCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_bPCs)[:, Cortical_ROIs_positions]
    WMDs[:, Cortical_ROIs_positions] = np.array(
        Cortical_WMDs)[:, Cortical_ROIs_positions]

    # average across thresholds, convert into percentage
    mean_NNC = (np.sum(NNCs, axis=0) / 15.0) * 100
    mean_BNWR = (np.sum(BNWRs, axis=0) / 15.0) * 100
    mean_PC = (np.sum(PCs, axis=0) /
               13.5) * 100  #this is the thoretical upperbound
    mean_bPC = (np.sum(bPCs, axis=0) /
                13.5) * 100  #this is the thoretical upperbound
    mean_WMD = (np.sum(WMDs, axis=0) / 15.0) * 100

    return PCs, BNWRs, NNCs, WMDs, bPCs, mean_NNC, mean_BNWR, mean_PC, mean_bPC, mean_WMD
Exemple #20
0
    def __init__(self,
                 kind,
                 parcel,
                 data='lau',
                 hemi='both',
                 binary=False,
                 version=1,
                 subset='all',
                 path=None):

        mainPath = path + "/brainNetworks/" + data + "/"
        home = os.path.expanduser("~")

        self.info = {}
        self.info["kind"] = kind
        self.info["parcel"] = parcel
        self.info["data"] = data
        self.info["hemi"] = hemi
        self.info["binary"] = binary
        self.info["version"] = version
        self.info["subset"] = subset

        if version == 1:
            version = ''
        else:
            version = "_v2"

        if binary is True:
            binary = "b"
        else:
            binary = ''

        if subset == "all":
            subset = ''

        if hemi == "both":
            hemi = ''

        matrxPath = mainPath + "matrices/" + subset + kind + parcel + hemi + binary + version

        # hemisphere
        self.hemi = np.load(matrxPath + "/hemi.npy")

        # Adjacency matrix
        path = matrxPath + ".npy"
        A = np.load(path)

        # Look at time when file was last modified
        last_modified = os.path.getmtime(path)

        # set negative values to 0, fill diagonal, make symmetric
        A[A < 0] = 0
        np.fill_diagonal(A, 0)
        A = (A + A.T) / 2
        self.adj = A

        # Number of nodes in the network
        self.n = len(self.adj)

        # coordinates
        path = mainPath + "coords/coords" + parcel + hemi + ".npy"
        self.coords = np.load(path)

        # Inverse of adjacency matrix
        inv = A.copy()
        inv[A > 0] = 1 / inv[A > 0]
        self.inv_adj = inv

        # distance
        self.dist = cdist(self.coords, self.coords)

        # shortest path
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/sp.npy"

        if os.path.exists(path) is False:
            print("shortest path not found")
            print("computing shortest path...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("computing shortest paths...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        else:
            self.sp = np.load(path)

        # diffusion embedding
        de = compute_diffusion_map(A, n_components=10, return_result=True)
        self.de = de[0]
        self.de_extra = de[1]

        # Principal components
        self.PCs, self.PCs_ev = load_data.getPCs(self.adj)

        # betweenness centrality
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/bc.npy"
        if os.path.exists(path) is False:

            print("betweenness centrality not found")
            print("computing betweenness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("recomputing betweeness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        else:
            self.bc = np.load(path)

        # communities + participation coefficient
        path = matrxPath + "/communities/"
        if os.path.exists(path):
            files = []
            for i in os.listdir(path):
                if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                    files.append(i)
            if len(files) > 0:
                self.ci = []
                for file in files:
                    self.ci.append(np.load(os.path.join(path, file)))

                self.ppc = []
                for i in range(len(files)):
                    ppc = bct.participation_coef(A, self.ci[i])
                    self.ppc.append(ppc)

        if (data == "HCP") and (kind == "SC"):
            path = mainPath + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
            self.lengths = np.load(path)

        # streamline connection lengths
        path = matrxPath + "/len.npy"
        if os.path.exists(path) is True:
            self.len = np.load(path)

        # network information
        if parcel[0] == "s":
            nb = parcel[1:]
            self.order = "LR"
            self.noplot = [b'Background+FreeSurfer_Defined_Medial_Wall', b'']
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-L_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-R_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
        else:
            nb = _parcel_to_n(parcel)
            self.order = "RL"
            self.noplot = None
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-L_deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-R_deterministic.annot")
            self.cammoun_id = nb
Exemple #21
0
    dsets = ['MGH']

    # import thresholded matrix to BCT, import partition, run WMD/PC
    PC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))
    WMD = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))
    EC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))
    GC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))
    SC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))
    ST = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 18166))

    for ix, matrix in enumerate(MATS):
        for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):

            tmp_matrix = threshold(matrix.copy(), cost)

            PC[i, :] = bct.participation_coef(tmp_matrix, CI)
            #WMD[i,:] = bct.module_degree_zscore(tmp_matrix,CI)
            #EC[i,:] = bct.eigenvector_centrality_und(tmp_matrix)
            #GC[i,:], _ = bct.gateway_coef_sign(tmp_matrix, CI)
            #SC[i,:] = bct.subgraph_centrality(tmp_matrix)
            #ST[i,:] = bct.strengths_und(tmp_matrix)

            mes = 'finished running cost:%s' % cost
            print(mes)

        # fn = 'images/Voxelwise_4mm_%s_WMD.nii' %dsets[ix]
        # write_graph_to_vol_yeo_template_nifti(np.nanmean(WMD,axis=0), fn, 'voxelwise')
        #
        # fn = 'images/Voxelwise_4mm_%s_WeightedDegree.nii' %dsets[ix]
        # write_graph_to_vol_yeo_template_nifti(np.nanmean(ST,axis=0), fn, 'voxelwise')
        #
Exemple #22
0
def cal_indiv_graph():
	'''loop through subjects and get PC/WMD/Q/eG/CI'''

	### loop through subjects, 1 to 156

	gordon_files = glob.glob("Data/*Gordon*.netcc")
	yeo_files = glob.glob("Data/*Yeo*.netcc")
	files = gordon_files + yeo_files

	for f in files:
		
		if f in gordon_files:		
			cmd = "cat %s | tail -n 352 > Data/test" %f 
			roi='gordon'
		
		if f in yeo_files:
			cmd = "cat %s | tail -n 422 > Data/test" %f #422 for Yeo
			roi='yeo'

		sub = f[5:8]
		os.system(cmd)


		# load matrix
		matrix = np.genfromtxt('Data/test',delimiter='\t',dtype=None)
		matrix[np.isnan(matrix)] = 0.0  
		matrix[matrix<0]=0.0


		# step through costs, do infomap, return final infomap across cost
		max_cost = .15
		min_cost = .01

		partition = ave_consensus_costs_parition(matrix, min_cost, max_cost)
		partition = np.array(partition) + 1

		# calculate modularity, efficiency?
		Q = cal_modularity_w_imposed_community(matrix,partition)
		Eg = bct.efficiency_wei(matrix)

		# import thresholded matrix to BCT, import partition, run WMD/PC
		PCs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))
		WMDs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))

		for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):
			
			tmp_matrix = threshold(matrix.copy(), cost)
			
			#PC
			PCs[i,:] = bct.participation_coef(tmp_matrix, partition)
			#WMD
			WMDs[i,:] = bct.module_degree_zscore(matrix, partition)

		PC = np.mean(PCs, axis=0) # ave across thresholds
		WMD = np.mean(WMDs, axis=0)

		
		fn = "Graph_output/%s_%s_PC" %(sub, roi)
		np.savetxt(fn, PC)

		fn = "Graph_output/%s_%s_WMD" %(sub, roi)
		np.savetxt(fn, WMD)
		
		fn = "Graph_output/%s_%s_Q" %(sub, roi)
		np.savetxt(fn, np.array(Q, ndmin=1))

		fn = "Graph_output/%s_%s_Eg" %(sub, roi)
		np.savetxt(fn, np.array(Eg, ndmin=1))

		fn = "Graph_output/%s_%s_Partition" %(sub, roi)
		np.savetxt(fn, partition)
        s, s)
    Yeo17ci = nib.load(fn)

    fn = '/data/backed_up/kahwang/ECoG_fMRI/%s/infomap/%s_Infomap_to_7Network_2mm.nii' % (
        s, s)
    Yeo7ci = nib.load(fn)

    Yeo7ci = np.round(masker.fit_transform(Yeo7ci))
    Yeo17ci = np.round(masker.fit_transform(Yeo17ci))
    ts = masker.fit_transform(ffiles)
    mat = np.nan_to_num(np.corrcoef(ts.T))
    min_cost = .02
    max_cost = .10
    PC_7 = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 400))
    PC_17 = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), 400))
    for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):
        tmp_matrix = threshold(mat.copy(), cost)
        PC_7[i, :] = bct.participation_coef(tmp_matrix, Yeo7ci)
        PC_17[i, :] = bct.participation_coef(tmp_matrix, Yeo17ci)

    fn = '/home/kahwang/bin/example_graph_pipeline/phub_images/%s_PC_7network.nii.gz' % s
    write_graph_to_vol_sch400_template_nifti(np.nanmean(PC_7, axis=0),
                                             fn,
                                             resolution=400)
    fn = '/home/kahwang/bin/example_graph_pipeline/phub_images/%s_PC_17network.nii.gz' % s
    write_graph_to_vol_sch400_template_nifti(np.nanmean(PC_17, axis=0),
                                             fn,
                                             resolution=400)

#end of line
def get_true_network_metrics(A):

    #control centrality
    c_c = control_centrality(A)

    cc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        cc_fake[i] = np.mean(control_centrality(generate_fake_graph(A)))

    m_cc_fake = np.mean(cc_fake)
    cc_norm = c_c / m_cc_fake

    # Get identity of node with lowest control centrality
    min_cc_true = np.where(c_c == np.amin(c_c))[0]

    # get synchronizability
    sync = synchronizability(A)

    # normalized sync
    sync_fake = np.zeros((100, 1))
    for i in range(0, 100):
        sync_fake[i] = synchronizability(generate_fake_graph(A))

    m_sync_fake = np.mean(sync_fake)
    sync_norm = sync / m_sync_fake

    # get betweeness centrality
    bc = betweenness_centrality(A)
    bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        bc_fake[i] = np.mean(betweenness_centrality(generate_fake_graph(A)))

    m_bc_fake = np.mean(bc_fake)
    bc_norm = bc / m_bc_fake

    # Get identity of node with max bc
    max_bc_true = np.where(bc == np.amax(bc))[0]

    # get eigenvector centrality
    ec = bct.eigenvector_centrality_und(A)
    ec_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ec_fake[i] = np.mean(
            bct.eigenvector_centrality_und(generate_fake_graph(A)))

    m_ec_fake = np.mean(ec_fake)
    ec_norm = ec / m_ec_fake

    # Get identity of node with max ec
    max_ec_true = np.where(ec == np.amax(ec))[0]

    # get edge betweeness centrality
    edge_bc, ignore = bct.edge_betweenness_wei(A)
    edge_bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        edge_bc_fake[i] = np.mean(
            bct.edge_betweenness_wei(generate_fake_graph(A))[0])
    m_edge_bc_fake = np.mean(edge_bc_fake)
    edge_bc_norm = edge_bc / m_edge_bc_fake

    # get clustering coeff
    clust = bct.clustering_coef_wu(A)
    clust_fake = np.zeros((100, 1))
    for i in range(0, 100):
        clust_fake[i] = np.mean(bct.clustering_coef_wu(generate_fake_graph(A)))

    m_clust_fake = np.mean(clust_fake)
    clust_norm = clust / m_clust_fake

    # Get identity of node with max clust
    max_clust_true = np.where(clust == np.amax(clust))[0]

    # get node strength
    ns = node_strength(A)
    ns_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ns_fake[i] = np.mean(node_strength(generate_fake_graph(A)))

    m_ns_fake = np.mean(ns_fake)
    ns_norm = ns / m_ns_fake

    # Get identity of node with max clust
    max_ns_true = np.where(ns == np.amax(ns))[0]

    #Get true efficiency
    Ci, ignore = bct.modularity_und(A)
    par = bct.participation_coef(A, Ci)

    eff = bct.efficiency_wei(A, 0)
    eff_fake = np.zeros((100, 1))
    for i in range(0, 100):
        eff_fake[i] = (bct.efficiency_wei(generate_fake_graph(A)))

    m_eff_fake = np.mean(eff_fake)
    eff_norm = eff / m_eff_fake

    # Get true transistivity
    trans = bct.transitivity_wu(A)
    trans_fake = np.zeros((100, 1))
    for i in range(0, 100):
        trans_fake[i] = (bct.transitivity_wu(generate_fake_graph(A)))

    m_trans_fake = np.mean(trans_fake)
    trans_norm = trans / m_trans_fake

    # store output results in a dictionary
    #nodal
    results = {}
    results['control_centrality'] = c_c
    results['control_centrality_norm'] = cc_norm
    results['min_cc_node'] = min_cc_true

    # global measure
    results['sync'] = sync
    results['sync_norm'] = sync_norm

    # nodal
    results['bc'] = bc
    results['bc_norm'] = bc_norm
    results['max_bc_node'] = max_bc_true

    # nodal
    results['ec'] = ec
    results['ec_norm'] = ec_norm
    results['max_ec_node'] = max_ec_true

    # nodal
    results['clust'] = clust
    results['clust_norm'] = clust_norm
    results['max_clust_node'] = max_clust_true

    # nodal
    results['ns'] = ns
    results['ns_norm'] = ns_norm
    results['max_ns_node'] = max_ns_true

    # global
    results['eff'] = eff
    results['eff_norm'] = eff_norm

    # global
    results['trans'] = trans
    results['trans_norm'] = trans_norm

    # nodal
    results['par'] = par

    # edge
    results['edge_bc'] = edge_bc
    results['edge_bc_norm'] = edge_bc_norm

    return (results)
def resample_network(A, n_perm, e_f, type_to_remove, final_electrodes):

    # type to remove : 1 for white matter, 0 for grey matter

    # e_f : Following Erin + John convention. This is the fraction of nodes to
    # keep in the network (ex.. ef=.2 means we remove 80% of the nodes)
    e_f = np.array(e_f)
    nch = A.shape[0]
    n_f = e_f.shape[0]

    # create sub dataframes for only the grey and white matter elec
    wm = final_electrodes[final_electrodes.iloc[:, 2] > 0]
    gm = final_electrodes[final_electrodes.iloc[:, 2] == 0]

    # numbers of each electrode type
    numWhite = wm.shape[0]
    numGrey = gm.shape[0]

    # fraction to remove
    if (type_to_remove == 1):
        e_n = numWhite - np.ceil(e_f * numWhite)
    else:
        e_n = numGrey - np.ceil(e_f * numGrey)

    # control centrality
    all_c_c = np.zeros((nch, n_f, n_perm))
    all_c_c[:] = np.nan
    cc_reg = np.zeros((nch, n_f, n_perm))
    cc_reg[:] = np.nan

    all_cc_norm = np.zeros((nch, n_f, n_perm))
    all_cc_norm[:] = np.nan

    #init node strengths
    all_ns = np.zeros((nch, n_f, n_perm))
    all_ns[:] = np.nan

    all_ns_norm = np.zeros((nch, n_f, n_perm))
    all_ns_norm[:] = np.nan

    # init betweenness centrality
    all_bc = np.zeros((nch, n_f, n_perm))
    all_bc[:] = np.nan

    all_bc_norm = np.zeros((nch, n_f, n_perm))
    all_bc_norm[:] = np.nan

    # synch
    all_sync = np.zeros((n_f, n_perm))
    all_sync[:] = np.nan

    all_sync_norm = np.zeros((n_f, n_perm))
    all_sync_norm[:] = np.nan

    # efficiency
    all_eff = np.zeros((n_f, n_perm))
    all_eff[:] = np.nan

    all_eff_norm = np.zeros((n_f, n_perm))
    all_eff_norm[:] = np.nan

    # eigenvector centrality
    all_ec = np.zeros((nch, n_f, n_perm))
    all_ec[:] = np.nan

    all_ec_norm = np.zeros((nch, n_f, n_perm))
    all_ec_norm[:] = np.nan

    # clustering coeff
    all_clust = np.zeros((nch, n_f, n_perm))
    all_clust[:] = np.nan

    all_clust_norm = np.zeros((nch, n_f, n_perm))
    all_clust_norm[:] = np.nan

    # participation coeff
    all_par = np.zeros((nch, n_f, n_perm))
    all_par[:] = np.nan

    # transistivity
    all_trans = np.zeros((n_f, n_perm))
    all_trans[:] = np.nan

    all_trans_norm = np.zeros((n_f, n_perm))
    all_trans_norm[:] = np.nan

    # edge bc
    all_edge_bc = []
    all_edge_bc_norm = []

    # get true particpation
    Ci, ignore = bct.modularity_und(A)
    true_par = bct.participation_coef(A, Ci)
    avg_par_removed = np.zeros((n_f, n_perm))
    avg_par_removed[:] = np.nan

    # get the true bc
    true_bc = betweenness_centrality(A)
    avg_bc_removed = np.zeros((n_f, n_perm))
    avg_bc_removed[:] = np.nan

    # loop over all removal fractions and permutations
    for f in range(0, n_f):
        all_edge_bc_cur_fraction = []
        all_edge_bc_norm_cur_fraction = []
        for i_p in range(0, n_perm):

            if (i_p % 100 == 0):
                print(
                    "Doing permutation {0} for removal of fraction {1}".format(
                        i_p, f))

            # make a copy of the adjacency matrix (we will edit this each time)
            A_tmp = A.copy()

            # picking the nodes to remove
            if (type_to_remove == 1):
                to_remove = wm.sample(int(e_n[f])).iloc[:, 0]
            else:
                to_remove = gm.sample(int(e_n[f])).iloc[:, 0]

            # take these electrodes out of the adjacency matrix
            A_tmp = np.delete(A_tmp, to_remove, axis=0)
            A_tmp = np.delete(A_tmp, to_remove, axis=1)

            # create a new array to hold the identity of the channels
            ch_ids = np.arange(0, nch)
            ch_ids = np.delete(ch_ids, to_remove)

            # get the new metrics from A_tmp
            r = get_true_network_metrics(A_tmp)

            # edge metric
            all_edge_bc_cur_fraction.append(r['edge_bc'])
            all_edge_bc_norm_cur_fraction.append(r['edge_bc_norm'])
            # populate the nodal measures
            for i in range(0, ch_ids.shape[0]):
                all_c_c[ch_ids[i], f, i_p] = r['control_centrality'][i]
                all_ns[ch_ids[i], f, i_p] = r['ns'][i]
                all_bc[ch_ids[i], f, i_p] = r['bc'][i]
                all_par[ch_ids[i], f, i_p] = r['par'][i]
                all_ec[ch_ids[i], f, i_p] = r['ec'][i]
                all_clust[ch_ids[i], f, i_p] = r['clust'][i]

                all_cc_norm[ch_ids[i], f,
                            i_p] = r['control_centrality_norm'][i]
                all_ns_norm[ch_ids[i], f, i_p] = r['ns_norm'][i]
                all_bc_norm[ch_ids[i], f, i_p] = r['bc_norm'][i]
                all_ec_norm[ch_ids[i], f, i_p] = r['ec_norm'][i]
                all_clust_norm[ch_ids[i], f, i_p] = r['clust_norm'][i]

            # populate the global measures
            all_sync[f, i_p] = r['sync']
            all_sync_norm[f, i_p] = r['sync_norm']

            all_eff[f, i_p] = r['eff']
            all_eff_norm[f, i_p] = r['eff_norm']

            all_trans[f, i_p] = r['trans']
            all_trans_norm[f, i_p] = r['trans_norm']

        all_edge_bc.append(all_edge_bc_cur_fraction)
        all_edge_bc_norm.append(all_edge_bc_norm_cur_fraction)

    # construct the output dictionary from a resampling

    #nodal
    results = {}
    results['control_centrality'] = all_c_c
    results['control_centrality_norm'] = all_cc_norm

    # global measure
    results['sync'] = all_sync
    results['sync_norm'] = all_sync_norm

    # nodal
    results['bc'] = all_bc
    results['bc_norm'] = all_bc_norm

    # nodal
    results['ec'] = all_ec
    results['ec_norm'] = all_ec_norm

    # nodal
    results['clust'] = all_clust
    results['clust_norm'] = all_clust_norm

    # nodal
    results['ns'] = all_ns
    results['ns_norm'] = all_ns_norm

    # global
    results['eff'] = all_eff
    results['eff_norm'] = all_eff_norm

    # global
    results['trans'] = all_trans
    results['trans_norm'] = all_trans_norm

    # nodal
    results['par'] = all_par

    #edge
    results['edge_bc'] = all_edge_bc
    results['edge_bc_norm'] = all_edge_bc_norm

    return (results)
ylab = 'node score PC%s' % (ncomp+1)
xlab = 'evolutionary expansion'
hctsa_utils.scatterregplot(y, x, title, xlab, ylab, pointsize=50)

####################################
# Participation coefficient
####################################
# tsn and participation coefficient
uniqlabels, uniqidx = np.unique(rsnlabels, return_index=True)
uniqlabels = uniqlabels[np.argsort(uniqidx)]
rsnidx = np.zeros((400, 1))
for n, rsn in enumerate(uniqlabels):
    idx = np.where(np.array(rsnlabels) == rsn)[0]
    rsnidx[idx] = n

participCoef = bct.participation_coef(fc_average_discov, rsnidx)

toplot = participCoef
brains = plotting.plot_conte69(toplot, lhlabels, rhlabels,
                               vmin=np.percentile(toplot, 2.5),
                               vmax=np.percentile(toplot, 97.5),
                               colormap='viridis',
                               colorbartitle=('fc participation coefficient'),
                               surf='inflated')

# plot and correlate
ncomp = 0
x = participCoef
y = node_score[:, ncomp]
corr = scipy.stats.spearmanr(x, y)
pvalspin = hctsa_utils.get_spinp(x, y, corrval=corr[0], nspin=10000,
Exemple #27
0
if __name__ == '__main__':

    dat_dir = os.environ['pnd']
    graph_dir = dat_dir+'/graphs/'
    modularity_dir = dat_dir+'/modularity/'
    role_dir = dat_dir+'/node_roles'
    if not os.path.exists(role_dir):
        os.makedirs(role_dir)
    nnodes = 148

    for subjid in ['pandit', 'ctrl']:
        thresh_dens = '0.1'
        qscores = '%s.dens_%s.Qval' % (subjid, thresh_dens)
        df = np.loadtxt(os.path.join(modularity_dir, qscores))
        maxiter = df.argmax()
        trees_name = '%s.dens_%s.trees' % (subjid, thresh_dens)
        trees_in = os.path.join(modularity_dir, trees_name)
        coms = np.loadtxt(trees_in)[:, maxiter]

        graph_name = '%s.dens_%s.edgelist.gz' % (subjid, thresh_dens)
        g = make_networkx_graph(nnodes, os.path.join(graph_dir, graph_name))
        ga = nx.adjacency_matrix(g).toarray()

        pc = bct.participation_coef(ga, coms)
        wz = bct.module_degree_zscore(ga, coms)

        pc_out_name = '%s.dens_%s_part_coef.txt' % (subjid, thresh_dens)
        np.savetxt(os.path.join(role_dir, pc_out_name), pc, fmt='%.4f')
        wz_out_name = '%s.dens_%s_within_mod_Z.txt' % (subjid, thresh_dens)
        np.savetxt(os.path.join(role_dir, wz_out_name), wz, fmt='%.4f')
Exemple #28
0
def Graph_Analysis(data,
                   weight=True,
                   thresh_func=bct.threshold_proportional,
                   threshold=.15,
                   plot_threshold=None,
                   community_alg=bct.community_louvain,
                   ref_community=None,
                   reorder=False,
                   display=True,
                   layout='kk',
                   print_options={},
                   plot_options={}):
    """
    Creates and displays graphs of a data matrix.
    
    Parameters
    ----------
    data: pandas DataFrame
        data to use to create the graph
    thresh_func: function that takes in a connectivity matrix and thresholds
        any algorithm that returns a connectivity matrix of the same size as the original may be used.
        intended to be used with bct.threshold_proportional or bct.threshold_absolute
    community_alg: function that takes in a connectivity matrix and returns community assignment
        intended to use algorithms from brain connectivity toolbox like commnity_louvain or 
        modularity_und. Must return a list of community assignments followed by Q, the modularity
        index
    edge_metric: str: 'pearson', 'separman' or 'MI'
        relationship metric between nodes. MI stands for mutual information. "abs_"
        may be used in front of "pearson" or "spearman" to get the absolute value
        of the correlations, e.g. abs_pearson
    threshold: float 0 <= x <= 1, optional
        the proportion of weights to keep (to be passed to bct.threshold_proportion)
    weight: bool, optional
        if True, creates a weighted graph (vs. a binary)
    reorder: bool, optional
        if True, reorder vertices based on community assignment
    display: bool, optinal
        if True, display the graph and print node membership
    layout: str: 'kk', 'circle', 'grid' or other igraph layouts, optional
        Determines how the graph is displayed
    avg_num_edges: int > 1
        thresholds the edges on the graph so each node has, on average, avg_num_edges
    print_options: dict, optional
        dictionary of arguments to be passed to print_community_members
    plot_options: dict, optional
        dictionary of arguments to be passed to plot_graph
        
    Returns
    ----------
    G: igraph Graph
        the graph object created by the function
    graph_mat: numpy matrix
        the matrix used to create the graph
    """

    #threshold and remove diagonal
    graph_mat = thresh_func(data.as_matrix(), threshold)

    # make a binary version if not weighted
    if not weight:
        graph_mat = np.ceil(graph_mat)
        G = igraph.Graph.Adjacency(graph_mat.tolist(), mode='undirected')
    else:
        G = igraph.Graph.Weighted_Adjacency(graph_mat.tolist(),
                                            mode='undirected')
    column_names = data.columns
    # community detection
    # using louvain but also bct.modularity_und which is "Newman's spectral community detection"
    # bct.modularity_louvain_und_sign
    comm, mod = community_alg(graph_mat)
    # if there is a reference, relbale communities based on their closest association
    if ref_community:
        comm = relabel_community(comm, ref_community)
    G.vs['community'] = comm
    G.vs['id'] = range(len(G.vs))
    G.vs['name'] = column_names
    G.vs['within_module_degree'] = bct.module_degree_zscore(graph_mat, comm)
    G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)

    if weight:
        G.vs['eigen_centrality'] = G.eigenvector_centrality(
            directed=False, weights=G.es['weight'])
    else:
        G.vs['eigen_centrality'] = G.eigenvector_centrality(directed=False)

    #if reorder, reorder vertices by community membership
    if reorder:
        G = community_reorder(G)
    # get connectivity matrix used to make the graph
    connectivity_matrix = graph_to_dataframe(G)
    # calculate subgraph (within-community) characteristics
    subgraph_analysis(G, community_alg=community_alg)

    # visualize
    layout_graph = None
    if plot_threshold:
        layout_mat = thresh_func(data.as_matrix(), plot_threshold)
        layout_graph = igraph.Graph.Weighted_Adjacency(layout_mat.tolist(),
                                                       mode='undirected')
    visual_style = {}
    visual_style = get_visual_style(G,
                                    layout_graph,
                                    layout=layout,
                                    vertex_size='eigen_centrality',
                                    labels=G.vs['id'],
                                    size=1000)
    if display:
        # plot community structure
        print_community_members(G, **print_options)
        plot_graph(G, visual_style=visual_style, **plot_options)
    return (G, connectivity_matrix, visual_style)