def calculate_communities(self, reorder=False, **kwargs):
     assert self.community_alg is not None, \
         print("Community algorithm has not been set!")
     G = self.G
     graph_mat = self.graph_mat
     # calculate community structure
     comm, mod = self.community_alg(graph_mat, **kwargs)
     # if there is a reference, relabel communities based on their closest association
     if self.ref_community:
         comm = self._relabel_community(comm, self.ref_community)
     # label vertices of G
     G.vs['community'] = comm
     G.vs['within_module_degree'] = bct.module_degree_zscore(
         graph_mat, comm)
     if np.min(graph_mat) < 0:
         participation_pos, participation_neg = bct.participation_coef_sign(
             graph_mat, comm)
         G.vs['part_coef_pos'] = participation_pos
         G.vs['part_coef_neg'] = participation_neg
     else:
         G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)
     if reorder:
         self.reorder()
     # calculate subgraph (within-community) characteristics
     self._subgraph_analysis()
     return mod
Exemplo n.º 2
0
def run_graph(sub):
    '''run graph analysis, get PC/WMD/Q'''
    for condition in Conditions:
        for atlas in Atlases:

            matrix = []
            for direction in Directions:
                fn = datapath + str(
                    sub) + '_' + condition + direction + atlas + '.corrmat'
                try:
                    matrix += [np.loadtxt(fn)]
                except:
                    break

            PCs = []
            WMDs = []
            Qs = []
            if np.shape(matrix)[0] > 1:
                matrix = np.sum(matrix, axis=0) / np.shape(matrix)[0]

                for c in np.arange(0.05, 0.16, 0.01):
                    mat_th = threshold(matrix.copy(), c)
                    graph = matrix_to_igraph(matrix.copy(), c)
                    i = graph.community_infomap(edge_weights='weight')
                    CI = np.asarray(i.membership) + 1
                    PCs += [bct.participation_coef(mat_th, CI)]
                    WMDs += [bct.module_degree_zscore(mat_th, CI)]
                    Qs += [i.modularity]

                fn = datapath + str(sub) + '_' + condition + atlas + '.PC'
                np.save(fn, PCs)
                fn = datapath + str(sub) + '_' + condition + atlas + '.WMD'
                np.save(fn, WMDs)
                fn = datapath + str(sub) + '_' + condition + atlas + '.Q'
                np.save(fn, Qs)
def test_zi():
    x = load_sample(thres=.4)
    ci = np.load(mat_path('sample_partition.npy'))

    zi = np.load(mat_path('sample_zi.npy'))

    zi_ = bct.module_degree_zscore(x, ci)
    print(list(zip(zi, zi_)))

    assert np.allclose(zi, zi_, atol=0.05)
def test_zi():
    x = load_sample(thres=.4)
    ci = np.load(mat_path('sample_partition.npy'))

    zi = np.load(mat_path('sample_zi.npy'))

    zi_ = bct.module_degree_zscore(x, ci)
    print(list(zip(zi, zi_)))

    assert np.allclose(zi, zi_, atol=0.05)
Exemplo n.º 5
0
    def compute(self):
        try:
            name = "Results/Communities/" + self.name + "_" + self.communities_algorithm + "_communities.node"
            print('Trying', name)
            last_results = pd.read_csv(
                name,
                " ",
                names=['X', 'Y', 'Z', 'Community', 'Degree', 'RegionName'])
            print('Loading communities...')
            communities = last_results['Community']
            region_names = last_results['RegionName']

        except Exception:
            print('Divinding communities...')
            communities = NM(self.g, self.name, self.stats,
                             self.communities_algorithm).compute()
            region_names = pd.read_csv("data/lobes.node", " ",
                                       header='infer')['RegionName']

        participations = bct.participation_coef(self.g, communities,
                                                'undirected')
        within_degrees = bct.module_degree_zscore(self.g, communities, 0)

        connectors, provincials = [], []
        for node in zip(participations, within_degrees, region_names):
            if node[1] > 1:
                if node[0] > 0.6:
                    connectors.append((node[2], node[1], node[0]))
                if node[0] < 0.4:
                    provincials.append((node[2], node[1], node[0]))

        print("Connectors", connectors)
        print("Provincials", provincials)

        self.stats['RegionName'] = region_names
        self.stats['Community'] = communities
        self.stats['wMD'] = within_degrees
        self.stats['PC'] = participations

        plots.create_box_plot_nodes('PC', self.stats, self.name)

        plots.create_plot_communitycolored(
            "reports/plots/ParticipationCoeff/" + str(self.name) + '_' +
            str(self.communities_algorithm) + ".pdf",
            ('fMRI' if self.name == 'fmri' else 'EEG-' + self.name),
            'Within-module Degree Z-score',
            within_degrees,
            'Participation Coefficient',
            participations,
            communities=communities,
            xticks=[-3, -2, -1, 0, 1, 2, 3],
            yticks=[0, 0.2, 0.4, 0.6, 0.8, 1.0])

        return self.stats
Exemplo n.º 6
0
def cal_dynamic_graph(MTD, impose=False, threshold=False):
    '''calculate graph metrics across time(dynamic)'''
    #setup outputs
    time_points = MTD.shape[0]
    ci = np.zeros([MTD.shape[1], MTD.shape[0]])
    q = np.zeros([MTD.shape[0]])
    WMD = np.zeros([MTD.shape[1], MTD.shape[0]])
    PC = np.zeros([MTD.shape[1], MTD.shape[0]])
    WW = np.zeros([MTD.shape[1], MTD.shape[0]])
    BW = np.zeros([MTD.shape[1], MTD.shape[0]])

    #modularity
    if impose:
        ci = np.tile(
            np.loadtxt(
                '/home/despoB/kaihwang/Rest/ThaGate/ROIs/Morel_Striatum_Gordon_CI'
            ), [time_points, 1]).T

    for i, t in enumerate(range(0, time_points)):
        matrix = MTD[i, :, :]

        #need to deal with NANs because of coverage (no signal in some ROIs)
        matrix[np.isnan(matrix)] = 0

        #threshold here
        if threshold:
            matrix = bct.threshold_proportional(matrix, threshold)

        #modularity
        if impose == False:
            ci[:, i], q[i] = bct.modularity_louvain_und_sign(matrix)

        #PC
        # for now, no negative weights
        matrix[matrix < 0] = 0
        PC[:, i] = bct.participation_coef(matrix, ci[:, i])

        #WMD
        WMD[:, i] = bct.module_degree_zscore(matrix, ci[:, i])

        ## within weight
        WW[:, i] = cal_within_weight(matrix, ci[:, i])

        ## between Weight
        BW[:, i] = cal_between_weight(matrix, ci[:, i])

        # cal q using impsose CI partition
        if impose:
            q[i] = cal_modularity_w_imposed_community(matrix, ci[:, i])

    return ci, q, PC, WMD, WW, BW
Exemplo n.º 7
0
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
Exemplo n.º 8
0
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
Exemplo n.º 9
0
def test_pipline():
    ''' A run thorugh test using a csv input from Aaron'''
    # load matrix
    matrix = np.genfromtxt('HCP_MMP1_roi-pair_corr.csv',
                           delimiter=',',
                           dtype=None)
    matrix[np.isnan(matrix)] = 0.0

    # step through costs, do infomap, return final infomap across cost
    max_cost = .15
    min_cost = .01

    # ave consensus across costs
    partition = ave_consensus_costs_parition(matrix, min_cost, max_cost)
    partition = np.array(partition) + 1

    # import thresholded matrix to BCT, import partition, run WMD/PC
    PCs = np.zeros((len(np.arange(min_cost, max_cost + 0.01,
                                  0.01)), matrix.shape[0]))
    WMDs = np.zeros((len(np.arange(min_cost, max_cost + 0.01,
                                   0.01)), matrix.shape[0]))

    for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):

        tmp_matrix = threshold(matrix.copy(), cost)

        #PC
        PCs[i, :] = bct.participation_coef(tmp_matrix, partition)
        #WMD
        WMDs[i, :] = bct.module_degree_zscore(matrix, partition)

    np.save("partition", partition)
    np.save("PCs", PCs)
    np.save("WMDs", WMDs)

    #altantively, merge consensus using the power method
    recursive_partition = power_recursive_partition(matrix, min_cost, max_cost)
    recursive_partition = recursive_partition + 1

    np.save('rescursive_partition', recursive_partition)
Exemplo n.º 10
0
def cal_sFC_graph(subject, sequence, roi, impose=False, threshold=1.0):
    ''' load TS and run static FC'''
    ts_path = '/home/despoB/kaihwang/Rest/ThaGate/NotBackedUp/'
    fn = ts_path + str(subject) + '_%s_%s_000.netts' % (roi, sequence)
    ts = np.loadtxt(fn)

    matrix = np.corrcoef(ts)
    matrix[np.isnan(matrix)] = 0

    matrix = bct.threshold_proportional(matrix, threshold)

    num_iter = 200
    consensus = np.zeros((num_iter, matrix.shape[0], matrix.shape[1]))

    for i in np.arange(0, num_iter):
        ci, _ = bct.modularity_louvain_und_sign(matrix, qtype='sta')
        consensus[i, :, :] = community_matrix(ci)

    mean_matrix = np.nanmean(consensus, axis=0)
    mean_matrix[np.isnan(mean_matrix)] = 0
    CI, Q = bct.modularity_louvain_und_sign(mean_matrix, qtype='sta')

    #no negative weights
    matrix[matrix < 0] = 0

    PC = bct.participation_coef(matrix, CI)

    #WMD
    WMD = bct.module_degree_zscore(matrix, CI)

    ## within weight
    WW = cal_within_weight(matrix, CI)

    ## between Weight
    BW = cal_between_weight(matrix, CI)

    return CI, Q, PC, WMD, WW, BW
 def calculate_communities(self, reorder=False, **kwargs):
     assert self.community_alg is not None, \
         print("Community algorithm has not been set!")
     G = self.G
     graph_mat = self.graph_mat
     # calculate community structure
     comm, mod = self.community_alg(graph_mat, **kwargs)
     # if there is a reference, relabel communities based on their closest association    
     if self.ref_community:
         comm = self._relabel_community(comm,self.ref_community)
     # label vertices of G
     G.vs['community'] = comm
     G.vs['within_module_degree'] = bct.module_degree_zscore(graph_mat,comm)
     if np.min(graph_mat) < 0:
         participation_pos, participation_neg = bct.participation_coef_sign(graph_mat, comm)
         G.vs['part_coef_pos'] = participation_pos
         G.vs['part_coef_neg'] = participation_neg
     else:
         G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)
     if reorder:
         self.reorder()
     # calculate subgraph (within-community) characteristics
     self._subgraph_analysis()
     return mod
Exemplo n.º 12
0
    def centrality(self,
                   sbj_number,
                   nodes_number,
                   atlas,
                   make_symmetric=True,
                   upper_threshold=None,
                   lower_threshold=None,
                   binarize=False):
        '''
        Computing centrality measures of the adjencency matrix


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        atlas: excel file |
                    please se example available in the repo (e.g. new_atlas_coords.xlsx)
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        upper_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        under that threshold will be 0 (Default is None)
        lower_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        above that threshold will be 0 (Default is None)
        binarize= Boolean|
                        True will make the connectivity matrix binary
                        Default is False


        Returns
        -------

        dict: : dictonary with the following keys |

        edge_betweeness_bin: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        edge_betweeness_wei: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        eigenvector_centrality_und: | np.ndarray
                            Eigenector centrality is a self-referential measure
                            of centrality: nodes have high eigenvector centrality
                            if they connect to other nodes that have high
                            eigenvector centrality. The eigenvector centrality of
                            node i is equivalent to the ith element in the eigenvector
                            corresponding to the largest eigenvalue of the adjacency matrix.
                            It will return the eigenvector associated with the
                            largest eigenvalue of the matrix
        coreness_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the node coreness.
        kn_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the size of k-core
        module_degree_zscore: | np.ndarray
                            The within-module degree z-score is a within-module
                            version of degree centrality. It will return
                            within-module degree Z-score
        participation_coef: | np.ndarray
                            Participation coefficient is a measure of diversity
                            of intermodular connections of individual nodes.
                            It will return the participation coefficient
        subgraph_centrality: | np.ndarray
                            The subgraph centrality of a node is a weighted sum
                            of closed walks of different lengths in the network
                            starting and ending at the node. This function returns
                            a vector of subgraph centralities for each node of the
                            network. It will return the subgraph centrality

        '''

        with open(self.net_label_txt) as f:
            net = f.read().splitlines()

        self.atlas = pd.read_excel(atlas, header=None)
        self.atlas = np.array(self.atlas)
        self.ci_original = self.atlas[:, 8]

        self.centrality = {
            "edge_betweeness_bin":
            np.zeros([sbj_number, nodes_number]),
            "edge_betweeness_wei":
            np.zeros([sbj_number, nodes_number]),
            "eigenvector_centrality_und":
            np.zeros([sbj_number, nodes_number]),
            "coreness_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "kn_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "module_degree_zscore":
            np.zeros([sbj_number, nodes_number]),
            "participation_coef":
            np.zeros([sbj_number, nodes_number]),
            "subgraph_centrality":
            np.zeros([sbj_number, nodes_number])
        }

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            self.max = np.max(self.matrix.flatten())
            if upper_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < upper_threshold * self.max / 100] = 0
            if lower_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix > lower_threshold * self.max / 100] = 0

            self.matrix_bin = bct.algorithms.binarize(self.matrix)
            self.matrix_weight = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            np.fill_diagonal(self.matrix, 0)
            np.fill_diagonal(self.matrix_bin, 0)
            np.fill_diagonal(self.matrix_weight, 0)

            self.BC = bct.betweenness_bin(self.matrix_bin)
            self.centrality['edge_betweeness_bin'][subj] = self.BC

            self.BC_w = bct.betweenness_wei(self.matrix_weight)
            self.centrality['edge_betweeness_wei'][subj] = self.BC_w

            self.v = bct.eigenvector_centrality_und(self.matrix)
            self.centrality['eigenvector_centrality_und'][subj] = self.v

            self.coreness, self.kn = bct.kcoreness_centrality_bu(
                self.matrix_bin)
            self.centrality['coreness_kcoreness_centrality_bu'][
                subj] = self.coreness
            self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn

            self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original)
            self.centrality['module_degree_zscore'][subj] = self.Z

            self.P = bct.participation_coef(self.matrix, ci=self.ci_original)
            self.centrality['participation_coef'][subj] = self.P

            self.Cs = bct.subgraph_centrality(self.matrix_bin)
            self.centrality['subgraph_centrality'][subj] = self.Cs

        return self.centrality
Exemplo n.º 13
0
def cal_thalamus_and_cortical_ROIs_nodal_properties(Thalamocortical_corrmat, Cortical_adj, \
 Cortical_plus_thalamus_CI, Thalamus_CIs, Cortical_CI, Cortical_ROIs_positions, Thalamus_voxel_positions, cost_thresholds):
    '''Function to calculate voxel-wise nodal properties of the thalamus, and nodal properties of cortical ROIs. 
	Metrics to be calculated include:
	
	Participation Coefficient (PC)
	Between network connectivity weiight (BNWR)
		Ratio of connection weight devoted to between network interactions
	Number of network/modules/components connected (NNC)
	Within module degree zscore (WMD)
		For WMD, matrices will be binarzied, and normalized to corticocortical connections' mean and SD

	usage: PCs, BNWRs, NNCs, WMDs, bPCs, mean_NNC, mean_BNWR, mean_PC, mean_bPC, mean_WMD = cal_thalamus_and_cortical_ROIs_nodal_properties(Thalamocor_adj,
                Cortical_adj,
                Cortical_plus_thalamus_CI,
                Thalamus_CIs,
                Cortical_CI,
                Cortical_ROIs_positions,
                Thalamus_voxel_positions,
                cost_thresholds)
    
    ----
    Parameters
    ----
    Thalamocor_adj: Thalamocortical adj matrix
    Cortical_adj: corticocortical adj matrix
    Cortical_plus_thalamus_CI: A vector of community/module/network assignment of all nodes, cortical ROIs + thalamic voxels
    Thalamus_CIs: A vector of network assignements for thalamic voxels
    Cortical_CI: A vector of network assignments for cortical ROIs 
    Cortical_ROIs_positions: a position vector indicating in the thalamocortical adj matrix which rows/columns are cortical ROIs
    Thalamus_voxel_posistions: a position vector indicating in the thalamocortical adj matrix which rows/columns are thalamic voxels
    cost_thresholds: the thoresholds that can threshold the thalamocortical edges at density .01 to .15. 

	return variables are graph metrics across thresholds (with "s"), or averaged across thresholds "mean"

    '''

    ##Thalamus nodal roles
    Thalamocortical_corrmat[np.isnan(Thalamocortical_corrmat)] = 0

    #PC
    PCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    bPCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    #BNWR between network connectivity weight
    BNWRs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    #get number of networks/communities connected
    NNCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)

    #loop through costs
    for c in cost_thresholds:
        #copy adj matrix and then threshold
        Par_adj = Thalamocortical_corrmat.copy()
        #remove weights connected to low SNR communities (CI==0, orbital frontal, inferior temporal)
        Par_adj[Cortical_ROIs_positions[Cortical_CI == 0], :] = 0
        Par_adj[:, Cortical_ROIs_positions[Cortical_CI == 0]] = 0
        Par_adj[Par_adj < c] = 0

        #binary
        bPar_adj = Par_adj.copy()
        bPar_adj = bPar_adj > c

        #PC
        PCs += [bct.participation_coef(Par_adj, Cortical_plus_thalamus_CI)]
        bPCs += [bct.participation_coef(bPar_adj, Cortical_plus_thalamus_CI)]
        #aPCs += [bct.participation_coef(Par_adj, Cortical_plus_thalamus_CI)]

        #BNWR and NNCs
        Tha_BNWR = np.zeros(Cortical_plus_thalamus_CI.size)
        Tha_NNCs = np.zeros(Cortical_plus_thalamus_CI.size)
        for ix, i in enumerate(Thalamus_voxel_positions):
            sum_between_weight = np.nansum(
                Par_adj[i, Cortical_plus_thalamus_CI != Thalamus_CIs[ix]])
            sum_total = np.nansum(Par_adj[i, :])
            Tha_BNWR[i] = sum_between_weight / sum_total
            Tha_BNWR[i] = np.nan_to_num(Tha_BNWR[i])

            Tha_NNCs[i] = len(
                np.unique(Cortical_plus_thalamus_CI[Par_adj[i, ] != 0]))
        BNWRs += [Tha_BNWR]
        NNCs += [Tha_NNCs]

    ##Cortical nodal roles
    Cortical_adj[np.isnan(Cortical_adj)] = 0

    Cortical_PCs = []  #np.zeros(Cortical_CI.size)
    Cortical_bPCs = []  #np.zeros(Cortical_CI.size)
    Cortical_BNWR = []  #np.zeros(Cortical_CI.size)
    Cortical_NNCs = []  #np.zeros(Cortical_plus_thalamus_CI.size)

    for ix, c in enumerate(np.arange(0.01, 0.16, 0.01)):
        M = bct.threshold_proportional(Cortical_adj, c, copy=True)
        bM = bct.weight_conversion(M, 'binarize', copy=True)

        #PC
        Cortical_PCs += [bct.participation_coef(M, Cortical_CI)]
        Cortical_bPCs += [bct.participation_coef(bM, Cortical_CI)]

        #BNWR and NNC
        BNWR = np.zeros(Cortical_CI.size)
        Cor_NNCs = np.zeros(Cortical_plus_thalamus_CI.size)
        for i in range(len(Cortical_CI)):
            sum_between_weight = np.nansum(M[i, Cortical_CI != Cortical_CI[i]])
            sum_total = np.nansum(M[i, :])
            BNWR[i] = sum_between_weight / sum_total
            BNWR[i] = np.nan_to_num(BNWR[i])

            Cor_NNCs[i] = len(np.unique(Cortical_CI[M[i, ] != 0]))
        Cortical_BNWR += [BNWR]
        Cortical_NNCs += [Cor_NNCs]

    #do WMD, first convert matrices to binary, then calcuate z score using mean and std of "corticocortical degrees"
    Cortical_wm_mean = {}
    Cortical_wm_std = {}
    Cortical_WMDs = []  #np.zeros(Cortical_CI.size)
    WMDs = []  #np.zeros(Cortical_plus_thalamus_CI.size)
    for ix, c in enumerate(np.arange(0.01, 0.16, 0.01)):

        #threshold by density
        bM = bct.weight_conversion(
            bct.threshold_proportional(Cortical_adj, c, copy=True), 'binarize')
        Cortical_WMDs += [bct.module_degree_zscore(bM, Cortical_CI)]

        #return mean and degree
        for CI in np.unique(Cortical_CI):
            Cortical_wm_mean[ix + 1, CI] = np.nanmean(
                np.sum(bM[Cortical_CI == CI, :][:, Cortical_CI == CI], 1))
            Cortical_wm_std[ix + 1, CI] = np.nanstd(
                np.sum(bM[Cortical_CI == CI, :][:, Cortical_CI == CI], 1))

        #thalamic WMD, threshold by density
        M = bct.weight_conversion(
            bct.threshold_absolute(Thalamocortical_corrmat,
                                   cost_thresholds[ix],
                                   copy=True), 'binarize')

        tha_wmd = np.zeros(Cortical_plus_thalamus_CI.size)
        for i in np.unique(Cortical_CI):
            tha_wmd[Cortical_plus_thalamus_CI==i] = (np.sum(M[Cortical_plus_thalamus_CI==i][:, Cortical_plus_thalamus_CI==i],1)\
            - Cortical_wm_mean[ix+1,i])/Cortical_wm_std[ix+1,i]
        tha_wmd = np.nan_to_num(tha_wmd)
        WMDs += [tha_wmd]

    # organize output
    NNCs = np.array(NNCs)
    BNWRs = np.array(BNWRs)
    PCs = np.array(PCs)
    bPCs = np.array(bPCs)
    WMDs = np.array(WMDs)

    NNCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_NNCs)[:, Cortical_ROIs_positions]
    BNWRs[:, Cortical_ROIs_positions] = np.array(
        Cortical_BNWR)[:, Cortical_ROIs_positions]
    PCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_PCs)[:, Cortical_ROIs_positions]
    bPCs[:, Cortical_ROIs_positions] = np.array(
        Cortical_bPCs)[:, Cortical_ROIs_positions]
    WMDs[:, Cortical_ROIs_positions] = np.array(
        Cortical_WMDs)[:, Cortical_ROIs_positions]

    # average across thresholds, convert into percentage
    mean_NNC = (np.sum(NNCs, axis=0) / 15.0) * 100
    mean_BNWR = (np.sum(BNWRs, axis=0) / 15.0) * 100
    mean_PC = (np.sum(PCs, axis=0) /
               13.5) * 100  #this is the thoretical upperbound
    mean_bPC = (np.sum(bPCs, axis=0) /
                13.5) * 100  #this is the thoretical upperbound
    mean_WMD = (np.sum(WMDs, axis=0) / 15.0) * 100

    return PCs, BNWRs, NNCs, WMDs, bPCs, mean_NNC, mean_BNWR, mean_PC, mean_bPC, mean_WMD
GC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
SC = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))
ST = np.zeros((len(np.arange(min_cost, max_cost + 0.01, 0.01)), size))

for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):

    tmp_matrix = bct.threshold_proportional(matrix, cost, copy=True)

    # # PC slow to compute, days per threshold
    # PC[i,:] = bct.participation_coef(tmp_matrix, CI)
    # fn = 'completed PC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())
    #
    # WMD seems relatively fast, maybe 10min per threshold
    WMD[i, :] = bct.module_degree_zscore(tmp_matrix, CI)
    fn = 'completed WMD calculation for %s at:' % cost
    print(fn)
    print(datetime.now())
    #
    # EC[i,:] = bct.eigenvector_centrality_und(tmp_matrix)
    # fn = 'completed EC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())

    # GC[i,:], _ = bct.gateway_coef_sign(tmp_matrix, CI)
    # fn = 'completed GC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())
    #
    # SC[i,:] = bct.subgraph_centrality(tmp_matrix)
Exemplo n.º 15
0
def cal_indiv_graph():
	'''loop through subjects and get PC/WMD/Q/eG/CI'''

	### loop through subjects, 1 to 156

	gordon_files = glob.glob("Data/*Gordon*.netcc")
	yeo_files = glob.glob("Data/*Yeo*.netcc")
	files = gordon_files + yeo_files

	for f in files:
		
		if f in gordon_files:		
			cmd = "cat %s | tail -n 352 > Data/test" %f 
			roi='gordon'
		
		if f in yeo_files:
			cmd = "cat %s | tail -n 422 > Data/test" %f #422 for Yeo
			roi='yeo'

		sub = f[5:8]
		os.system(cmd)


		# load matrix
		matrix = np.genfromtxt('Data/test',delimiter='\t',dtype=None)
		matrix[np.isnan(matrix)] = 0.0  
		matrix[matrix<0]=0.0


		# step through costs, do infomap, return final infomap across cost
		max_cost = .15
		min_cost = .01

		partition = ave_consensus_costs_parition(matrix, min_cost, max_cost)
		partition = np.array(partition) + 1

		# calculate modularity, efficiency?
		Q = cal_modularity_w_imposed_community(matrix,partition)
		Eg = bct.efficiency_wei(matrix)

		# import thresholded matrix to BCT, import partition, run WMD/PC
		PCs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))
		WMDs = np.zeros((len(np.arange(min_cost, max_cost+0.01, 0.01)), matrix.shape[0]))

		for i, cost in enumerate(np.arange(min_cost, max_cost, 0.01)):
			
			tmp_matrix = threshold(matrix.copy(), cost)
			
			#PC
			PCs[i,:] = bct.participation_coef(tmp_matrix, partition)
			#WMD
			WMDs[i,:] = bct.module_degree_zscore(matrix, partition)

		PC = np.mean(PCs, axis=0) # ave across thresholds
		WMD = np.mean(WMDs, axis=0)

		
		fn = "Graph_output/%s_%s_PC" %(sub, roi)
		np.savetxt(fn, PC)

		fn = "Graph_output/%s_%s_WMD" %(sub, roi)
		np.savetxt(fn, WMD)
		
		fn = "Graph_output/%s_%s_Q" %(sub, roi)
		np.savetxt(fn, np.array(Q, ndmin=1))

		fn = "Graph_output/%s_%s_Eg" %(sub, roi)
		np.savetxt(fn, np.array(Eg, ndmin=1))

		fn = "Graph_output/%s_%s_Partition" %(sub, roi)
		np.savetxt(fn, partition)
Exemplo n.º 16
0
if __name__ == '__main__':

    dat_dir = os.environ['pnd']
    graph_dir = dat_dir+'/graphs/'
    modularity_dir = dat_dir+'/modularity/'
    role_dir = dat_dir+'/node_roles'
    if not os.path.exists(role_dir):
        os.makedirs(role_dir)
    nnodes = 148

    for subjid in ['pandit', 'ctrl']:
        thresh_dens = '0.1'
        qscores = '%s.dens_%s.Qval' % (subjid, thresh_dens)
        df = np.loadtxt(os.path.join(modularity_dir, qscores))
        maxiter = df.argmax()
        trees_name = '%s.dens_%s.trees' % (subjid, thresh_dens)
        trees_in = os.path.join(modularity_dir, trees_name)
        coms = np.loadtxt(trees_in)[:, maxiter]

        graph_name = '%s.dens_%s.edgelist.gz' % (subjid, thresh_dens)
        g = make_networkx_graph(nnodes, os.path.join(graph_dir, graph_name))
        ga = nx.adjacency_matrix(g).toarray()

        pc = bct.participation_coef(ga, coms)
        wz = bct.module_degree_zscore(ga, coms)

        pc_out_name = '%s.dens_%s_part_coef.txt' % (subjid, thresh_dens)
        np.savetxt(os.path.join(role_dir, pc_out_name), pc, fmt='%.4f')
        wz_out_name = '%s.dens_%s_within_mod_Z.txt' % (subjid, thresh_dens)
        np.savetxt(os.path.join(role_dir, wz_out_name), wz, fmt='%.4f')
Exemplo n.º 17
0
def Graph_Analysis(data,
                   weight=True,
                   thresh_func=bct.threshold_proportional,
                   threshold=.15,
                   plot_threshold=None,
                   community_alg=bct.community_louvain,
                   ref_community=None,
                   reorder=False,
                   display=True,
                   layout='kk',
                   print_options={},
                   plot_options={}):
    """
    Creates and displays graphs of a data matrix.
    
    Parameters
    ----------
    data: pandas DataFrame
        data to use to create the graph
    thresh_func: function that takes in a connectivity matrix and thresholds
        any algorithm that returns a connectivity matrix of the same size as the original may be used.
        intended to be used with bct.threshold_proportional or bct.threshold_absolute
    community_alg: function that takes in a connectivity matrix and returns community assignment
        intended to use algorithms from brain connectivity toolbox like commnity_louvain or 
        modularity_und. Must return a list of community assignments followed by Q, the modularity
        index
    edge_metric: str: 'pearson', 'separman' or 'MI'
        relationship metric between nodes. MI stands for mutual information. "abs_"
        may be used in front of "pearson" or "spearman" to get the absolute value
        of the correlations, e.g. abs_pearson
    threshold: float 0 <= x <= 1, optional
        the proportion of weights to keep (to be passed to bct.threshold_proportion)
    weight: bool, optional
        if True, creates a weighted graph (vs. a binary)
    reorder: bool, optional
        if True, reorder vertices based on community assignment
    display: bool, optinal
        if True, display the graph and print node membership
    layout: str: 'kk', 'circle', 'grid' or other igraph layouts, optional
        Determines how the graph is displayed
    avg_num_edges: int > 1
        thresholds the edges on the graph so each node has, on average, avg_num_edges
    print_options: dict, optional
        dictionary of arguments to be passed to print_community_members
    plot_options: dict, optional
        dictionary of arguments to be passed to plot_graph
        
    Returns
    ----------
    G: igraph Graph
        the graph object created by the function
    graph_mat: numpy matrix
        the matrix used to create the graph
    """

    #threshold and remove diagonal
    graph_mat = thresh_func(data.as_matrix(), threshold)

    # make a binary version if not weighted
    if not weight:
        graph_mat = np.ceil(graph_mat)
        G = igraph.Graph.Adjacency(graph_mat.tolist(), mode='undirected')
    else:
        G = igraph.Graph.Weighted_Adjacency(graph_mat.tolist(),
                                            mode='undirected')
    column_names = data.columns
    # community detection
    # using louvain but also bct.modularity_und which is "Newman's spectral community detection"
    # bct.modularity_louvain_und_sign
    comm, mod = community_alg(graph_mat)
    # if there is a reference, relbale communities based on their closest association
    if ref_community:
        comm = relabel_community(comm, ref_community)
    G.vs['community'] = comm
    G.vs['id'] = range(len(G.vs))
    G.vs['name'] = column_names
    G.vs['within_module_degree'] = bct.module_degree_zscore(graph_mat, comm)
    G.vs['part_coef'] = bct.participation_coef(graph_mat, comm)

    if weight:
        G.vs['eigen_centrality'] = G.eigenvector_centrality(
            directed=False, weights=G.es['weight'])
    else:
        G.vs['eigen_centrality'] = G.eigenvector_centrality(directed=False)

    #if reorder, reorder vertices by community membership
    if reorder:
        G = community_reorder(G)
    # get connectivity matrix used to make the graph
    connectivity_matrix = graph_to_dataframe(G)
    # calculate subgraph (within-community) characteristics
    subgraph_analysis(G, community_alg=community_alg)

    # visualize
    layout_graph = None
    if plot_threshold:
        layout_mat = thresh_func(data.as_matrix(), plot_threshold)
        layout_graph = igraph.Graph.Weighted_Adjacency(layout_mat.tolist(),
                                                       mode='undirected')
    visual_style = {}
    visual_style = get_visual_style(G,
                                    layout_graph,
                                    layout=layout,
                                    vertex_size='eigen_centrality',
                                    labels=G.vs['id'],
                                    size=1000)
    if display:
        # plot community structure
        print_community_members(G, **print_options)
        plot_graph(G, visual_style=visual_style, **plot_options)
    return (G, connectivity_matrix, visual_style)