コード例 #1
0
def extract_features(g, method='None'):
    """
    Extract features from each node of graph g.
    :param g: adjacency matrix of g
    :param method: specifies what features to extract
    :return: feature for each node
    """

    feat = list(clustering_coef_wu(g))
    feat = feat + list(eigenvector_centrality_und(g))
    feat = feat + list(g.sum(axis=0))
    feat.append(assortativity_wei(g))

    return feat
コード例 #2
0
ファイル: graph.py プロジェクト: aestrivex/cvu
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
コード例 #3
0
ファイル: graph.py プロジェクト: rick-mukherjee-z/cvu
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
コード例 #4
0
def get_true_network_metrics(A):

    #control centrality
    c_c = control_centrality(A)

    cc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        cc_fake[i] = np.mean(control_centrality(generate_fake_graph(A)))

    m_cc_fake = np.mean(cc_fake)
    cc_norm = c_c / m_cc_fake

    # Get identity of node with lowest control centrality
    min_cc_true = np.where(c_c == np.amin(c_c))[0]

    # get synchronizability
    sync = synchronizability(A)

    # normalized sync
    sync_fake = np.zeros((100, 1))
    for i in range(0, 100):
        sync_fake[i] = synchronizability(generate_fake_graph(A))

    m_sync_fake = np.mean(sync_fake)
    sync_norm = sync / m_sync_fake

    # get betweeness centrality
    bc = betweenness_centrality(A)
    bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        bc_fake[i] = np.mean(betweenness_centrality(generate_fake_graph(A)))

    m_bc_fake = np.mean(bc_fake)
    bc_norm = bc / m_bc_fake

    # Get identity of node with max bc
    max_bc_true = np.where(bc == np.amax(bc))[0]

    # get eigenvector centrality
    ec = bct.eigenvector_centrality_und(A)
    ec_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ec_fake[i] = np.mean(
            bct.eigenvector_centrality_und(generate_fake_graph(A)))

    m_ec_fake = np.mean(ec_fake)
    ec_norm = ec / m_ec_fake

    # Get identity of node with max ec
    max_ec_true = np.where(ec == np.amax(ec))[0]

    # get edge betweeness centrality
    edge_bc, ignore = bct.edge_betweenness_wei(A)
    edge_bc_fake = np.zeros((100, 1))
    for i in range(0, 100):
        edge_bc_fake[i] = np.mean(
            bct.edge_betweenness_wei(generate_fake_graph(A))[0])
    m_edge_bc_fake = np.mean(edge_bc_fake)
    edge_bc_norm = edge_bc / m_edge_bc_fake

    # get clustering coeff
    clust = bct.clustering_coef_wu(A)
    clust_fake = np.zeros((100, 1))
    for i in range(0, 100):
        clust_fake[i] = np.mean(bct.clustering_coef_wu(generate_fake_graph(A)))

    m_clust_fake = np.mean(clust_fake)
    clust_norm = clust / m_clust_fake

    # Get identity of node with max clust
    max_clust_true = np.where(clust == np.amax(clust))[0]

    # get node strength
    ns = node_strength(A)
    ns_fake = np.zeros((100, 1))
    for i in range(0, 100):
        ns_fake[i] = np.mean(node_strength(generate_fake_graph(A)))

    m_ns_fake = np.mean(ns_fake)
    ns_norm = ns / m_ns_fake

    # Get identity of node with max clust
    max_ns_true = np.where(ns == np.amax(ns))[0]

    #Get true efficiency
    Ci, ignore = bct.modularity_und(A)
    par = bct.participation_coef(A, Ci)

    eff = bct.efficiency_wei(A, 0)
    eff_fake = np.zeros((100, 1))
    for i in range(0, 100):
        eff_fake[i] = (bct.efficiency_wei(generate_fake_graph(A)))

    m_eff_fake = np.mean(eff_fake)
    eff_norm = eff / m_eff_fake

    # Get true transistivity
    trans = bct.transitivity_wu(A)
    trans_fake = np.zeros((100, 1))
    for i in range(0, 100):
        trans_fake[i] = (bct.transitivity_wu(generate_fake_graph(A)))

    m_trans_fake = np.mean(trans_fake)
    trans_norm = trans / m_trans_fake

    # store output results in a dictionary
    #nodal
    results = {}
    results['control_centrality'] = c_c
    results['control_centrality_norm'] = cc_norm
    results['min_cc_node'] = min_cc_true

    # global measure
    results['sync'] = sync
    results['sync_norm'] = sync_norm

    # nodal
    results['bc'] = bc
    results['bc_norm'] = bc_norm
    results['max_bc_node'] = max_bc_true

    # nodal
    results['ec'] = ec
    results['ec_norm'] = ec_norm
    results['max_ec_node'] = max_ec_true

    # nodal
    results['clust'] = clust
    results['clust_norm'] = clust_norm
    results['max_clust_node'] = max_clust_true

    # nodal
    results['ns'] = ns
    results['ns_norm'] = ns_norm
    results['max_ns_node'] = max_ns_true

    # global
    results['eff'] = eff
    results['eff_norm'] = eff_norm

    # global
    results['trans'] = trans
    results['trans_norm'] = trans_norm

    # nodal
    results['par'] = par

    # edge
    results['edge_bc'] = edge_bc
    results['edge_bc_norm'] = edge_bc_norm

    return (results)
コード例 #5
0
scores_all = dict()

for toi in tois:
    cls_all = []
    pln_all = []
    for subject in subjects:
        cls = np.load(source_folder + "graph_data/%s_classic_corr_%s_orth.npy" %
                      (subject, toi))

        pln = np.load(source_folder + "graph_data/%s_plan_corr_%s_orth.npy" %
                      (subject, toi))

        cls_all.append(cls.mean(axis=0))
        pln_all.append(pln.mean(axis=0))

        data_cls = np.asarray([bct.eigenvector_centrality_und(g)
                               for g in cls_all])
        data_pln = np.asarray([bct.eigenvector_centrality_und(g)
                               for g in pln_all])

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(n_splits=6, shuffle=True)

    cv_params = {
        "learning_rate": np.arange(0.1, 1.1, 0.1),
        'n_estimators': np.arange(1, 80, 2)
    }

    grid = GridSearchCV(
コード例 #6
0
from jumeg.connectivity import plot_degree_circle, plot_lines_and_blobs

import matplotlib.pyplot as plt

orig_labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path(
) + '/data/desikan_aparc_cortex_based_grouping.yaml'
con_fname = get_jumeg_path() + '/data/sample,aparc-con.npy'

# real connectivity
con = np.load(con_fname)
con = con[0, :, :, 2] + con[0, :, :, 2].T
degrees = mne.connectivity.degree(con, threshold=0.2)

import bct
eigenvec_centrality = bct.eigenvector_centrality_und(con)

fig, ax = plot_lines_and_blobs(con,
                               degrees,
                               yaml_fname,
                               orig_labels_fname,
                               figsize=(8, 8),
                               show_node_labels=False,
                               show_group_labels=True,
                               n_lines=100,
                               out_fname=None,
                               degsize=10)
ax.set_title('Eigen vector centrality: Coh,alpha')
fig.tight_layout()

# test connections
コード例 #7
0
ファイル: metrics.py プロジェクト: Davi1990/DissNet
    def centrality(self,
                   sbj_number,
                   nodes_number,
                   atlas,
                   make_symmetric=True,
                   upper_threshold=None,
                   lower_threshold=None,
                   binarize=False):
        '''
        Computing centrality measures of the adjencency matrix


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        atlas: excel file |
                    please se example available in the repo (e.g. new_atlas_coords.xlsx)
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        upper_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        under that threshold will be 0 (Default is None)
        lower_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        above that threshold will be 0 (Default is None)
        binarize= Boolean|
                        True will make the connectivity matrix binary
                        Default is False


        Returns
        -------

        dict: : dictonary with the following keys |

        edge_betweeness_bin: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        edge_betweeness_wei: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        eigenvector_centrality_und: | np.ndarray
                            Eigenector centrality is a self-referential measure
                            of centrality: nodes have high eigenvector centrality
                            if they connect to other nodes that have high
                            eigenvector centrality. The eigenvector centrality of
                            node i is equivalent to the ith element in the eigenvector
                            corresponding to the largest eigenvalue of the adjacency matrix.
                            It will return the eigenvector associated with the
                            largest eigenvalue of the matrix
        coreness_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the node coreness.
        kn_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the size of k-core
        module_degree_zscore: | np.ndarray
                            The within-module degree z-score is a within-module
                            version of degree centrality. It will return
                            within-module degree Z-score
        participation_coef: | np.ndarray
                            Participation coefficient is a measure of diversity
                            of intermodular connections of individual nodes.
                            It will return the participation coefficient
        subgraph_centrality: | np.ndarray
                            The subgraph centrality of a node is a weighted sum
                            of closed walks of different lengths in the network
                            starting and ending at the node. This function returns
                            a vector of subgraph centralities for each node of the
                            network. It will return the subgraph centrality

        '''

        with open(self.net_label_txt) as f:
            net = f.read().splitlines()

        self.atlas = pd.read_excel(atlas, header=None)
        self.atlas = np.array(self.atlas)
        self.ci_original = self.atlas[:, 8]

        self.centrality = {
            "edge_betweeness_bin":
            np.zeros([sbj_number, nodes_number]),
            "edge_betweeness_wei":
            np.zeros([sbj_number, nodes_number]),
            "eigenvector_centrality_und":
            np.zeros([sbj_number, nodes_number]),
            "coreness_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "kn_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "module_degree_zscore":
            np.zeros([sbj_number, nodes_number]),
            "participation_coef":
            np.zeros([sbj_number, nodes_number]),
            "subgraph_centrality":
            np.zeros([sbj_number, nodes_number])
        }

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            self.max = np.max(self.matrix.flatten())
            if upper_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < upper_threshold * self.max / 100] = 0
            if lower_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix > lower_threshold * self.max / 100] = 0

            self.matrix_bin = bct.algorithms.binarize(self.matrix)
            self.matrix_weight = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            np.fill_diagonal(self.matrix, 0)
            np.fill_diagonal(self.matrix_bin, 0)
            np.fill_diagonal(self.matrix_weight, 0)

            self.BC = bct.betweenness_bin(self.matrix_bin)
            self.centrality['edge_betweeness_bin'][subj] = self.BC

            self.BC_w = bct.betweenness_wei(self.matrix_weight)
            self.centrality['edge_betweeness_wei'][subj] = self.BC_w

            self.v = bct.eigenvector_centrality_und(self.matrix)
            self.centrality['eigenvector_centrality_und'][subj] = self.v

            self.coreness, self.kn = bct.kcoreness_centrality_bu(
                self.matrix_bin)
            self.centrality['coreness_kcoreness_centrality_bu'][
                subj] = self.coreness
            self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn

            self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original)
            self.centrality['module_degree_zscore'][subj] = self.Z

            self.P = bct.participation_coef(self.matrix, ci=self.ci_original)
            self.centrality['participation_coef'][subj] = self.P

            self.Cs = bct.subgraph_centrality(self.matrix_bin)
            self.centrality['subgraph_centrality'][subj] = self.Cs

        return self.centrality
コード例 #8
0
for toi in tois:
    cls_all = []
    pln_all = []
    for subject in subjects:
        cls = np.load(source_folder +
                      "graph_data/%s_classic_corr_%s_orth.npy" %
                      (subject, toi))

        pln = np.load(source_folder + "graph_data/%s_plan_corr_%s_orth.npy" %
                      (subject, toi))

        cls_all.append(cls.mean(axis=0))
        pln_all.append(pln.mean(axis=0))

        data_cls = np.asarray(
            [bct.eigenvector_centrality_und(g) for g in cls_all])
        data_pln = np.asarray(
            [bct.eigenvector_centrality_und(g) for g in pln_all])

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(n_splits=6, shuffle=True)

    cv_params = {
        "learning_rate": np.arange(0.1, 1.1, 0.1),
        'n_estimators': np.arange(1, 80, 2)
    }

    grid = GridSearchCV(AdaBoostClassifier(),
                        cv_params,
コード例 #9
0
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_pln.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_pln.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(np.asarray([bct.eigenvector_centrality_und(g)
                                    for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(np.asarray([bct.eigenvector_centrality_und(g)
                                    for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedShuffleSplit(y, test_size=0.1)
コード例 #10
0
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_post.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_post.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.eigenvector_centrality_und(g)
                        for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.eigenvector_centrality_und(g)
                        for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(y, n_folds=6, shuffle=True)
コード例 #11
0
ファイル: resilience.py プロジェクト: Davi1990/DissNet
    def nodal_degree_vulnerability(self,
                                   sbj_number,
                                   nodes_number,
                                   make_symmetric=True,
                                   binarize=False,
                                   threshold=None,
                                   recalculate=False,
                                   attack_type='target',
                                   metric2use='degree'):
        '''
        Performs robustness analysis based on nodal degree.


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        binarize: Boolean|
                        True will make the connectivity matrix binary
                        Default is False
        recalculate: Boolean|
                        It will use sequential (recalculate = True) or
                        simultaneous (recalculate = False) approach.
                        Default is False
        attack_type: str |
                        It can be either 'target' or 'random'

        Returns
        -------

        vulnerability: np.array |
                    The overall vulnerability of the network

        '''

        self.all_vulnerability = np.zeros([sbj_number])
        self.all_x = np.zeros([sbj_number, nodes_number])
        self.all_y = np.zeros([sbj_number, nodes_number])
        self.all_largest_comp = np.zeros([sbj_number, nodes_number])

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            if threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < threshold *
                            np.max(self.matrix.flatten()) / 100] = 0

            np.fill_diagonal(self.matrix, 0)
            if attack_type == 'target':
                if metric2use == 'degree':
                    self.deg = bct.algorithms.degrees_und(self.matrix)
                elif metric2use == 'eigenvector_centrality':
                    self.deg = bct.eigenvector_centrality_und(self.matrix)
                elif metric2use == 'betweenness_bin':
                    self.deg = bct.betweenness_bin(self.matrix)
                elif metric2use == 'betweenness_wei':
                    self.deg = bct.betweenness_wei(self.matrix)
                self.g = networkx.convert_matrix.from_numpy_array(self.matrix)
                self.m = dict(enumerate(self.deg.flatten(), 0))
                self.l = sorted(self.m.items(),
                                key=operator.itemgetter(1),
                                reverse=True)
                self.x = []
                self.y = []
                self.lcomp = []
                self.largest_component = max(networkx.connected_components(
                    self.g),
                                             key=len)
                self.n = len(self.g.nodes())
                self.x.append(0)
                self.y.append(len(self.largest_component) * 1. / self.n)
                self.lcomp.append(len(self.largest_component))
                self.R = 0.0
                for i in range(1, self.n):
                    self.g.remove_node(self.l.pop(0)[0])
                    if recalculate:
                        self.matrix = networkx.convert_matrix.to_numpy_array(
                            self.g)
                        self.g = networkx.convert_matrix.from_numpy_array(
                            self.matrix)
                        if metric2use == 'degree':
                            self.deg = bct.algorithms.degrees_und(self.matrix)
                        elif metric2use == 'eigenvector_centrality':
                            self.deg = bct.eigenvector_centrality_und(
                                self.matrix)
                        elif metric2use == 'betweenness_bin':
                            self.deg = bct.betweenness_bin(self.matrix)
                        elif metric2use == 'betweenness_wei':
                            self.deg = bct.betweenness_wei(self.matrix)
                        self.m = dict(enumerate(self.deg.flatten(), 0))
                        self.l = sorted(self.m.items(),
                                        key=operator.itemgetter(1),
                                        reverse=True)
                    self.largest_component = max(networkx.connected_components(
                        self.g),
                                                 key=len)
                    self.x.append(i * 1. / self.n)
                    self.R += len(self.largest_component) * 1. / self.n
                    self.y.append(len(self.largest_component) * 1. / self.n)
                    self.lcomp.append(len(self.largest_component))

            elif attack_type == 'random':
                self.g = networkx.convert_matrix.from_numpy_array(self.matrix)
                self.l = [(self.node, 0) for self.node in self.g.nodes()]
                random.shuffle(self.l)
                self.x = []
                self.y = []
                self.lcomp = []
                self.largest_component = max(networkx.connected_components(
                    self.g),
                                             key=len)
                self.n = len(self.g.nodes())
                self.x.append(0)
                self.y.append(len(self.largest_component) * 1. / self.n)
                self.lcomp.append(len(self.largest_component))
                self.R = 0.0
                for i in range(1, self.n):
                    self.g.remove_node(self.l.pop(0)[0])
                    self.largest_component = max(networkx.connected_components(
                        self.g),
                                                 key=len)
                    self.x.append(i * 1. / self.n)
                    self.R += len(self.largest_component) * 1. / self.n
                    self.y.append(len(self.largest_component) * 1. / self.n)
                    self.lcomp.append(len(self.largest_component))

            self.all_x[subj] = np.array(self.x)
            self.all_y[subj] = np.array(self.y)
            self.all_vulnerability[subj] = np.array(0.5 - self.R / self.n)
            self.all_largest_comp[subj] = np.array(self.lcomp)

        return self.all_vulnerability, self.all_x, self.all_y, self.all_largest_comp
コード例 #12
0
def load_network(kind,
                 parcel,
                 data="lau",
                 weights='log',
                 hemi="both",
                 version=1,
                 subset="all",
                 path=None):
    '''
    Function to load a dictionary containing information about the specified
    brain network.

    Parameters
    ----------
    kind : string
        Either 'SC' or 'FC'.
    hemi : string
        Either "both", "L" or "R".
    weights " string
        The weights of the edges. The options  "normal", "log" or "binary".
        The default is "log".
    data : string
        Either "HCP" or "lau".
    parcel : string
        Either "68", "114", ... [if 'lau'] / "s400", "s800" [if "HCP"]
    version : int
        Version of the network.
    subset : string
        Either 'discov', 'valid' or 'all'
    path : string
        path to the "data" folder in which the data will be stored. If
        none, then assumes that path is current folder.

    Returns
    -------
    Network : dictionary
        Dictionary storing relevant attributes about the network
    '''

    # Initialize dictionary + store basic information about the network
    Network = {}
    Network["info"] = {}
    Network["info"]["kind"] = kind
    Network["info"]["parcel"] = parcel
    Network["info"]["data"] = data
    Network["info"]["hemi"] = hemi
    Network["info"]["weights"] = weights
    Network["info"]["version"] = version
    Network["info"]["subset"] = subset

    # Modify parameter names to what they are in file names
    version = '' if version == 1 else '_v' + str(version)
    subset = '' if subset == 'all' else subset
    hemi = '' if hemi == 'both' else hemi

    # Store important paths for loading the relevant data
    main_path = path + "/brainNetworks/" + data + "/"
    network_path = (main_path + "matrices/consensus/" + subset + kind +
                    parcel + hemi + version + "/")
    matrix_path = network_path + "/" + weights

    # Store general information about the network's parcellation
    parcel_info = get_general_parcellation_info(parcel)
    Network['order'] = parcel_info[0]
    Network['noplot'] = parcel_info[1]
    Network['lhannot'] = parcel_info[2]
    Network['rhannot'] = parcel_info[3]
    Network['atlas'] = parcel_info[4]

    # Load the cammoun_id of the parcellation, if Cammoun (i.e. 033, 060, etc.)
    if parcel[0] != 's':
        Network['cammoun_id'] = parcel_to_n(parcel)

    # masks
    masks = get_node_masks(Network, path=main_path)
    Network['node_mask'] = masks[0]
    Network['hemi_mask'] = masks[1]
    Network['subcortex_mask'] = masks[2]

    # hemisphere
    Network['hemi'] = get_hemi(Network, path=main_path)

    # coordinates
    Network['coords'] = get_coordinates(Network, path=main_path)

    # Adjacency matrix
    Network['adj'], last_modified = get_adjacency(Network,
                                                  matrix_path,
                                                  minimal_processing=True,
                                                  return_last_modified=True)

    # Test whether the network is connected. Raise a warning if not...
    if not np.all(bct.reachdist(Network['adj'])[0]):
        warnings.warn(("This brain network appears to be disconnected. This "
                       "might cause problem for the computation of the other "
                       "measures"))

    # node strength
    Network["str"] = np.sum(Network['adj'], axis=0)

    # Inverse of adjacency matrix
    inv = Network['adj'].copy()
    inv[Network['adj'] > 0] = 1 / inv[Network['adj'] > 0]
    Network["inv_adj"] = inv

    # distance
    Network["dist"] = cdist(Network["coords"], Network["coords"])

    # clustering coefficient
    Network["cc"] = bct.clustering_coef_wu(Network['adj'])

    # shortest path
    Network['sp'] = get_shortest_path(Network,
                                      matrix_path=matrix_path,
                                      last_modified=last_modified)

    # diffusion embedding
    de = compute_diffusion_map(Network['adj'],
                               n_components=10,
                               return_result=True,
                               skip_checks=True)
    Network["de"] = de[0]
    Network["de_extra"] = de[1]

    # Principal components
    Network['PCs'], Network['PCs_ev'] = getPCs(Network['adj'])

    # eigenvector centrality
    Network["ec"] = bct.eigenvector_centrality_und(Network['adj'])

    # mean first passage time
    Network["mfpt"] = bct.mean_first_passage_time(Network['adj'])

    # betweenness centrality
    Network['bc'] = get_betweenness(Network,
                                    matrix_path=matrix_path,
                                    last_modified=last_modified)

    # routing efficiency
    Network["r_eff"] = efficiency(Network)

    # diffusion efficiency
    Network["d_eff"] = efficiency_diffusion(Network)

    # subgraph centrality
    Network["subc"] = bct.subgraph_centrality(Network["adj"])

    # closeness centrality
    Network['clo'] = 1 / np.mean(Network['sp'], axis=0)

    # communities + participation coefficient
    path = matrix_path + "/communities/"
    if os.path.exists(path):
        files = []
        for i in os.listdir(path):
            if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                files.append(i)
        if len(files) > 0:
            Network["ci"] = []
            for file in files:
                Network["ci"].append(np.load(os.path.join(path, file)))

            Network["ppc"] = []
            for i in range(len(files)):
                ppc = bct.participation_coef(Network['adj'], Network["ci"][i])
                Network["ppc"].append(ppc)

    # Edge lengths
    if (data == "HCP") and (kind == "SC"):
        path = main_path + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
        if os.path.exists(path):
            Network["lengths"] = np.load(path)

    # streamline connection lengths
    path = network_path + "/len.npy"
    if os.path.exists(path):
        Network['len'] = np.load(path)

    # ROI names
    if parcel[0] != "s":
        Network['ROInames'] = get_ROInames(Network)

    # geodesic distances between nodes
    if parcel[0] == "s":
        n = parcel[1:]
        fname_l = n + "Parcels7Networks_lh_dist.csv"
        fname_r = n + "Parcels7Networks_rh_dist.csv"
    else:
        fname_l = "scale" + Network['cammoun_id'] + "_lh_dist.csv"
        fname_r = "scale" + Network['cammoun_id'] + "_rh_dist.csv"
    Network['geo_dist_L'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_l,
                                        header=None).values
    Network['geo_dist_R'] = pd.read_csv(main_path + "/geodesic/medial/" +
                                        fname_r,
                                        header=None).values

    return Network
コード例 #13
0
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_pre.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_pre.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.eigenvector_centrality_und(g)
                        for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.eigenvector_centrality_und(g)
                        for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(y, n_folds=6, shuffle=True)
コード例 #14
0
 def ec(self):
     """eigenvector centrality"""
     return bct.eigenvector_centrality_und(self.adj)
コード例 #15
0
def process(data):
    return bct.eigenvector_centrality_und(data)
コード例 #16
0
ファイル: identipy.py プロジェクト: kbonna/opti-measures
def calc_graph_vector(filename, thresholds) :
    '''
    This function calculates graph measures for connectivity matrix loaded from textfile
    and save results under the same name with additional superscript +'_GV' (in same dir
    filename is located)
    
    Input arguments:                                               
        filename(str):     name of file containing connectivity matrix (txt extension)
        thresholds(list):  list containing thresholds of interest        #
    
    Kamil Bonna, 14.08.2018 
    '''
    #--- check inputs
    import os
    if not os.path.exists(filename):
        raise Exception('{} does not exist'.format(filename))
    if type(thresholds) != list: 
        raise Exception('thresholds should be a list!')
        
    import numpy as np
    import bct

    #=== inner variables
    N_rep_louvain = 10   # number of Louvain algorithm repetitions
    N_measures = 10      # number of graph measures
    gamma = 1            # Louvain resolution parameter
    
    #--- load matrix 
    A_raw = np.loadtxt(filename)
    N = A_raw.shape[0]   # number of nodes
    M_sat = N*(N-1)/2    # max number of connections 

    #=== calculate output
    graph_measures = np.zeros([ len(thresholds), N_measures ])  # create empty output matrix
    for thr in range(len(thresholds)) : 
        #--- thresholding 
        A = bct.threshold_proportional( A_raw, p=thresholds[thr], copy=True );
        A[np.nonzero(A<0)] = 0                                  # ensure only positive weights
        M_act = A[np.nonzero(A>0)].shape[0] / 2                 # actual number of nonzero connections
        #--- calculate measures
        #-- mean connection strenght 
        S = np.sum(A)/M_act
        #-- connection strenght std
        Svar = np.std(A[np.nonzero(A)])
        #-- modularity
        [M,Q] = bct.modularity_louvain_und(A, gamma)
        for i in range(N_rep_louvain) :
            [Mt,Qt] = bct.modularity_louvain_und(A, gamma)
            if Qt > Q :
                Q = Qt
                M = Mt
        #-- participation coefficient
        P = np.mean(bct.participation_coef_sign(A, M))
        #-- clustering 
        C = np.mean(bct.clustering_coef_wu(A))
        #-- transitivity 
        T = bct.transitivity_wu(A)
        #-- assortativity
        Asso = bct.assortativity_wei(A)
        #-- global & local efficiency 
        Eglo = bct.efficiency_wei(A)
        Eloc = np.mean(bct.efficiency_wei(A, local=True))
        #-- mean eigenvector centralit
        Eig = np.mean(bct.eigenvector_centrality_und(A))
        #--- write vector to matrix
        graph_measures[thr] = [ S, Svar, Q, P, C, T, Asso, Eglo, Eloc, Eig ]

    #=== save results to file
    np.savetxt( filename[:-4]+'_GV.txt', graph_measures )