예제 #1
0
    def create_feature_matrix(self):
       # Feature matrix with each element containing an NxN array
       feature_matrix = []

       # EDGE WEIGHT (Depth 0)
       structural_connectivity_array = self.get_structure_and_function()
       feature_matrix.append(structural_connectivity_array)

       # DEGREE (Depth 1 & 2)
       deg = bct.degrees_und(structural_connectivity_array)
       self.fill_array_2D(feature_matrix, deg)

       # Conversion of connection weights to connection lengths
       connection_length_matrix = bct.weight_conversion(structural_connectivity_array, 'lengths')

       # SHORTEST PATH LENGTH (Depth 3 & 4)
       shortest_path = bct.distance_wei(connection_length_matrix)
       feature_matrix.append(shortest_path[0])  # distance (shortest weighted path) matrix
       feature_matrix.append(shortest_path[1])  # matrix of number of edges in shortest weighted path

       # BETWEENNESS CENTRALITY (Depth 5 & 6)
       bc = bct.betweenness_wei(connection_length_matrix)
       from python_files.create_feature_matrix import fill_array_2D
       self.fill_array_2D(feature_matrix, bc)

       # CLUSTERING COEFFICIENTS (Depth 7 & 8)
       cl = bct.clustering_coef_wu(connection_length_matrix)
       self.fill_array_2D(feature_matrix, cl)

       return feature_matrix
def get_hubs(connectome, hub_count=6):
    node_betwn = bct.betweenness_wei(connectome)
    idx = node_betwn.argsort()
    n = len(node_betwn)
    node_betwn[idx[0:n - hub_count]] = 0
    node_betwn[idx[n - hub_count:]] = 1

    return node_betwn
예제 #3
0
    def compute(self):
        centrality_unweighted = bct.betweenness_bin(self.binarized)
        centrality_weighted = bct.betweenness_wei(self.g)
        self.stats['Betweenness Unweighted'] = [
            v for v in centrality_unweighted
        ]
        self.stats['Betweenness Weighted'] = [v for v in centrality_weighted]

        return self.stats
예제 #4
0
def get_betweenness(Network, matrix_path, last_modified=0):
    '''
    Function to get the betweenness centrality of of a network's nodes. If a
    matrix_path is given, this function will try to load the centrality scores
    using this matrix path. If these scores have been generated before the
    adjacency matrix itself has been generated, then the scores will be
    recomputed.
    '''

    if matrix_path is not None:

        path = matrix_path + "/bc.npy"

        if not os.path.exists(path):

            print("betweenness centrality not found")
            print("computing betweenness centrality...")

            bc = bct.betweenness_wei(Network["inv_adj"])
            np.save(path, bc)

        elif os.path.getmtime(path) < last_modified:

            print("new adjacency matrix was found")
            print("computing betweenness centrality...")

            bc = bct.betweenness_wei(Network["inv_adj"])
            np.save(path, bc)

        else:

            bc = np.load(path)

    else:
        bc = bct.betweenness_wei(Network["inv_adj"])

    return bc
예제 #5
0
def extract_epoch_graph_features(W):
    import bct

    L = bct.weight_conversion(W, "lengths")
    L[W == 0] = np.inf
    D, _ = bct.distance_wei(L)

    l, eff, ecc, radius, diameter = bct.charpath(D, include_infinite=False)

    return [
        bct.clustering_coef_wu(W),
        bct.efficiency_wei(W, local=True),
        bct.betweenness_wei(L),
        ecc,
        [l, eff, radius, diameter],
    ]
def get_outlier_nodes(data, feat="deg", threshold=1e-3):
    T = len(data)
    n = len(data[0])
    feature = np.empty((T, n))
    for t in range(0, T):
        if feat == "deg":
            feature[t] = data[t].sum(axis=0)
        elif feat == "cent":
            feature[t] = bct.betweenness_wei(data[t])
        elif feat == "cc":
            feature[t] = bct.clustering_coef_wu(data[t])
        elif feat == "assort":
            feature[t] = bct.assortativity_wei(data[t])

    diff = forward_diff(forward_diff(feature))
    ind_vec = diff > threshold
    print(ind_vec.sum(axis=None))
    return ind_vec
예제 #7
0
def make_structural_connectivity_array(structure_file_path_array):
    # Get Structural Connectivity data in mat file format. Output from DSI studio
    structural_connectivity_array = []
    counter = 0
    for file_path in structure_file_path_array:
        structural_connectivity_array.append(
            np.array(pd.DataFrame(loadmat(file_path)['connectivity'])))
        counter += 1
        print("I\'m trying...{0}".format(counter))

    # *** Conversion of connection weights to connection lengths ***
    connection_length_matrix = []
    for s in structural_connectivity_array:
        connection_length_matrix.append(bct.weight_conversion(s, 'lengths'))

    # Betweenness Centrality
    btwn_cent_arr = []
    for structural_matrix in connection_length_matrix:
        btwn_cent_arr.append(bct.betweenness_wei(structural_matrix))

    return btwn_cent_arr
예제 #8
0
파일: metrics.py 프로젝트: Davi1990/DissNet
    def centrality(self,
                   sbj_number,
                   nodes_number,
                   atlas,
                   make_symmetric=True,
                   upper_threshold=None,
                   lower_threshold=None,
                   binarize=False):
        '''
        Computing centrality measures of the adjencency matrix


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        atlas: excel file |
                    please se example available in the repo (e.g. new_atlas_coords.xlsx)
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        upper_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        under that threshold will be 0 (Default is None)
        lower_threshold: int |
                        an integer value ranging from 0 to 100 representing the
                        percentage of values as respect to maximum. The value
                        above that threshold will be 0 (Default is None)
        binarize= Boolean|
                        True will make the connectivity matrix binary
                        Default is False


        Returns
        -------

        dict: : dictonary with the following keys |

        edge_betweeness_bin: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        edge_betweeness_wei: | np.ndarray
                            Edge betweenness centrality is the fraction of all
                            shortest paths in the network that contain a given
                            edge. Edges with high values of betweenness centrality
                            participate in a large number of shortest paths.
                            It will return node betweenness centrality vector.
        eigenvector_centrality_und: | np.ndarray
                            Eigenector centrality is a self-referential measure
                            of centrality: nodes have high eigenvector centrality
                            if they connect to other nodes that have high
                            eigenvector centrality. The eigenvector centrality of
                            node i is equivalent to the ith element in the eigenvector
                            corresponding to the largest eigenvalue of the adjacency matrix.
                            It will return the eigenvector associated with the
                            largest eigenvalue of the matrix
        coreness_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the node coreness.
        kn_kcoreness_centrality_bu: | np.ndarray
                            The k-core is the largest subgraph comprising nodes
                            of degree at least k. The coreness of a node is k if
                            the node belongs to the k-core but not to the (k+1)-core.
                            This function computes the coreness of all nodes for a
                            given binary undirected connection matrix.
                            It will return the size of k-core
        module_degree_zscore: | np.ndarray
                            The within-module degree z-score is a within-module
                            version of degree centrality. It will return
                            within-module degree Z-score
        participation_coef: | np.ndarray
                            Participation coefficient is a measure of diversity
                            of intermodular connections of individual nodes.
                            It will return the participation coefficient
        subgraph_centrality: | np.ndarray
                            The subgraph centrality of a node is a weighted sum
                            of closed walks of different lengths in the network
                            starting and ending at the node. This function returns
                            a vector of subgraph centralities for each node of the
                            network. It will return the subgraph centrality

        '''

        with open(self.net_label_txt) as f:
            net = f.read().splitlines()

        self.atlas = pd.read_excel(atlas, header=None)
        self.atlas = np.array(self.atlas)
        self.ci_original = self.atlas[:, 8]

        self.centrality = {
            "edge_betweeness_bin":
            np.zeros([sbj_number, nodes_number]),
            "edge_betweeness_wei":
            np.zeros([sbj_number, nodes_number]),
            "eigenvector_centrality_und":
            np.zeros([sbj_number, nodes_number]),
            "coreness_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "kn_kcoreness_centrality_bu":
            np.zeros([sbj_number, nodes_number]),
            "module_degree_zscore":
            np.zeros([sbj_number, nodes_number]),
            "participation_coef":
            np.zeros([sbj_number, nodes_number]),
            "subgraph_centrality":
            np.zeros([sbj_number, nodes_number])
        }

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            self.max = np.max(self.matrix.flatten())
            if upper_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < upper_threshold * self.max / 100] = 0
            if lower_threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix > lower_threshold * self.max / 100] = 0

            self.matrix_bin = bct.algorithms.binarize(self.matrix)
            self.matrix_weight = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            np.fill_diagonal(self.matrix, 0)
            np.fill_diagonal(self.matrix_bin, 0)
            np.fill_diagonal(self.matrix_weight, 0)

            self.BC = bct.betweenness_bin(self.matrix_bin)
            self.centrality['edge_betweeness_bin'][subj] = self.BC

            self.BC_w = bct.betweenness_wei(self.matrix_weight)
            self.centrality['edge_betweeness_wei'][subj] = self.BC_w

            self.v = bct.eigenvector_centrality_und(self.matrix)
            self.centrality['eigenvector_centrality_und'][subj] = self.v

            self.coreness, self.kn = bct.kcoreness_centrality_bu(
                self.matrix_bin)
            self.centrality['coreness_kcoreness_centrality_bu'][
                subj] = self.coreness
            self.centrality['kn_kcoreness_centrality_bu'][subj] = self.kn

            self.Z = bct.module_degree_zscore(self.matrix, ci=self.ci_original)
            self.centrality['module_degree_zscore'][subj] = self.Z

            self.P = bct.participation_coef(self.matrix, ci=self.ci_original)
            self.centrality['participation_coef'][subj] = self.P

            self.Cs = bct.subgraph_centrality(self.matrix_bin)
            self.centrality['subgraph_centrality'][subj] = self.Cs

        return self.centrality
예제 #9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_length_matrix, args.in_conn_matrix])

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    if not args.append_json:
        assert_outputs_exist(parser, args, args.out_json)
    else:
        logging.debug('Using --append_json, make sure to delete {} '
                      'before re-launching a group analysis.'.format(
                          args.out_json))

    if args.append_json and args.overwrite:
        parser.error('Cannot use the append option at the same time as '
                     'overwrite.\nAmbiguous behavior, consider deleting the '
                     'output json file first instead.')

    conn_matrix = load_matrix_in_any_format(args.in_conn_matrix)
    len_matrix = load_matrix_in_any_format(args.in_length_matrix)

    if args.filtering_mask:
        mask_matrix = load_matrix_in_any_format(args.filtering_mask)
        conn_matrix *= mask_matrix
        len_matrix *= mask_matrix
    N = len_matrix.shape[0]

    if args.avg_node_wise:
        func_cast = avg_cast
    else:
        func_cast = list_cast

    gtm_dict = {}
    betweenness_centrality = bct.betweenness_wei(len_matrix) / ((N - 1) *
                                                                (N - 2))
    gtm_dict['betweenness_centrality'] = func_cast(betweenness_centrality)
    ci, gtm_dict['modularity'] = bct.modularity_louvain_und(conn_matrix,
                                                            seed=0)

    gtm_dict['assortativity'] = bct.assortativity_wei(conn_matrix, flag=0)
    gtm_dict['participation'] = func_cast(
        bct.participation_coef_sign(conn_matrix, ci)[0])
    gtm_dict['clustering'] = func_cast(bct.clustering_coef_wu(conn_matrix))

    gtm_dict['nodal_strength'] = func_cast(bct.strengths_und(conn_matrix))
    gtm_dict['local_efficiency'] = func_cast(
        bct.efficiency_wei(len_matrix, local=True))
    gtm_dict['global_efficiency'] = func_cast(bct.efficiency_wei(len_matrix))
    gtm_dict['density'] = func_cast(bct.density_und(conn_matrix)[0])

    # Rich club always gives an error for the matrix rank and gives NaN
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        tmp_rich_club = bct.rich_club_wu(conn_matrix)
    gtm_dict['rich_club'] = func_cast(tmp_rich_club[~np.isnan(tmp_rich_club)])

    # Path length gives an infinite distance for unconnected nodes
    # All of this is simply to fix that
    empty_connections = np.where(np.sum(len_matrix, axis=1) < 0.001)[0]
    if len(empty_connections):
        len_matrix = np.delete(len_matrix, empty_connections, axis=0)
        len_matrix = np.delete(len_matrix, empty_connections, axis=1)

    path_length_tuple = bct.distance_wei(len_matrix)
    gtm_dict['path_length'] = func_cast(path_length_tuple[0])
    gtm_dict['edge_count'] = func_cast(path_length_tuple[1])

    if not args.avg_node_wise:
        for i in empty_connections:
            gtm_dict['path_length'].insert(i, -1)
            gtm_dict['edge_count'].insert(i, -1)

    if args.small_world:
        gtm_dict['omega'], gtm_dict['sigma'] = omega_sigma(len_matrix)

    if os.path.isfile(args.out_json) and args.append_json:
        with open(args.out_json) as json_data:
            out_dict = json.load(json_data)
        for key in gtm_dict.keys():
            if isinstance(out_dict[key], list):
                out_dict[key].append(gtm_dict[key])
            else:
                out_dict[key] = [out_dict[key], gtm_dict[key]]
    else:
        out_dict = {}
        for key in gtm_dict.keys():
            out_dict[key] = [gtm_dict[key]]

    with open(args.out_json, 'w') as outfile:
        json.dump(out_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
    elif group == 'control':
        participants = ['c1','c2','c3','c5','c6','c7','c8']
        
    all_measures = np.empty(shape=[68,len(participants),5])
    adjmats =  np.empty(shape=[68,68,len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(generate_ROI_file(FreeSurfer_ROI_file)).values
        labels,adjmat = remove_non_cortical_ROIs(labels,adjmat)
        all_measures[:,counter,0] = bct.degrees_und(adjmat)
        all_measures[:,counter,1] = bct.strengths_und(adjmat)
        all_measures[:,counter,2] = bct.clustering_coef_wu(adjmat)
        all_measures[:,counter,3] = bct.betweenness_wei(adjmat)
        all_measures[:,counter,4] = bct.efficiency_wei(adjmat,local=True)
        adjmats[:,:,counter] = adjmat
        counter += 1
        
        
    mean_measures = np.mean(all_measures,axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures, index=labels,columns=['patient.NodeDegree','patient.Strength','patient.ClustCoeff','patient.BetweenCent','patient.LocEff'])
        patient_measures = all_measures
        patient_adjmats = adjmats
    elif group == 'control':
        control = pd.DataFrame(mean_measures, index=labels,columns=['control.NodeDegree','control.Strength','control.ClustCoeff','control.BetweenCent','control.LocEff'])
        control_measures = all_measures
        control_adjmats = adjmats
       
예제 #11
0
    def nodal_degree_vulnerability(self,
                                   sbj_number,
                                   nodes_number,
                                   make_symmetric=True,
                                   binarize=False,
                                   threshold=None,
                                   recalculate=False,
                                   attack_type='target',
                                   metric2use='degree'):
        '''
        Performs robustness analysis based on nodal degree.


        Parameters
        ----------
        sbj_number: int |
                    number of subjects
        nodes_number: int|
                      number of nodes
        make_symmetric: Boolean|
                        True indicate that the matrix is either upper
                        or lower triangular and need to be symmetrize
                        False indicate that the matrix is a full matrix already
        binarize: Boolean|
                        True will make the connectivity matrix binary
                        Default is False
        recalculate: Boolean|
                        It will use sequential (recalculate = True) or
                        simultaneous (recalculate = False) approach.
                        Default is False
        attack_type: str |
                        It can be either 'target' or 'random'

        Returns
        -------

        vulnerability: np.array |
                    The overall vulnerability of the network

        '''

        self.all_vulnerability = np.zeros([sbj_number])
        self.all_x = np.zeros([sbj_number, nodes_number])
        self.all_y = np.zeros([sbj_number, nodes_number])
        self.all_largest_comp = np.zeros([sbj_number, nodes_number])

        for subj in range(len(self.matrices_files)):
            self.matrix = pd.read_csv(self.matrices_files[subj],
                                      sep=' ',
                                      header=None)
            self.matrix = np.array(self.matrix)
            if make_symmetric == True:
                self.matrix = self.matrix + self.matrix.T - np.diag(
                    self.matrix.diagonal())
            else:
                self.matrix = self.matrix

            if binarize == True:
                self.matrix = bct.algorithms.binarize(self.matrix)
            else:
                self.matrix = self.matrix

            if threshold == None:
                self.matrix = self.matrix
            else:
                self.matrix[self.matrix < threshold *
                            np.max(self.matrix.flatten()) / 100] = 0

            np.fill_diagonal(self.matrix, 0)
            if attack_type == 'target':
                if metric2use == 'degree':
                    self.deg = bct.algorithms.degrees_und(self.matrix)
                elif metric2use == 'eigenvector_centrality':
                    self.deg = bct.eigenvector_centrality_und(self.matrix)
                elif metric2use == 'betweenness_bin':
                    self.deg = bct.betweenness_bin(self.matrix)
                elif metric2use == 'betweenness_wei':
                    self.deg = bct.betweenness_wei(self.matrix)
                self.g = networkx.convert_matrix.from_numpy_array(self.matrix)
                self.m = dict(enumerate(self.deg.flatten(), 0))
                self.l = sorted(self.m.items(),
                                key=operator.itemgetter(1),
                                reverse=True)
                self.x = []
                self.y = []
                self.lcomp = []
                self.largest_component = max(networkx.connected_components(
                    self.g),
                                             key=len)
                self.n = len(self.g.nodes())
                self.x.append(0)
                self.y.append(len(self.largest_component) * 1. / self.n)
                self.lcomp.append(len(self.largest_component))
                self.R = 0.0
                for i in range(1, self.n):
                    self.g.remove_node(self.l.pop(0)[0])
                    if recalculate:
                        self.matrix = networkx.convert_matrix.to_numpy_array(
                            self.g)
                        self.g = networkx.convert_matrix.from_numpy_array(
                            self.matrix)
                        if metric2use == 'degree':
                            self.deg = bct.algorithms.degrees_und(self.matrix)
                        elif metric2use == 'eigenvector_centrality':
                            self.deg = bct.eigenvector_centrality_und(
                                self.matrix)
                        elif metric2use == 'betweenness_bin':
                            self.deg = bct.betweenness_bin(self.matrix)
                        elif metric2use == 'betweenness_wei':
                            self.deg = bct.betweenness_wei(self.matrix)
                        self.m = dict(enumerate(self.deg.flatten(), 0))
                        self.l = sorted(self.m.items(),
                                        key=operator.itemgetter(1),
                                        reverse=True)
                    self.largest_component = max(networkx.connected_components(
                        self.g),
                                                 key=len)
                    self.x.append(i * 1. / self.n)
                    self.R += len(self.largest_component) * 1. / self.n
                    self.y.append(len(self.largest_component) * 1. / self.n)
                    self.lcomp.append(len(self.largest_component))

            elif attack_type == 'random':
                self.g = networkx.convert_matrix.from_numpy_array(self.matrix)
                self.l = [(self.node, 0) for self.node in self.g.nodes()]
                random.shuffle(self.l)
                self.x = []
                self.y = []
                self.lcomp = []
                self.largest_component = max(networkx.connected_components(
                    self.g),
                                             key=len)
                self.n = len(self.g.nodes())
                self.x.append(0)
                self.y.append(len(self.largest_component) * 1. / self.n)
                self.lcomp.append(len(self.largest_component))
                self.R = 0.0
                for i in range(1, self.n):
                    self.g.remove_node(self.l.pop(0)[0])
                    self.largest_component = max(networkx.connected_components(
                        self.g),
                                                 key=len)
                    self.x.append(i * 1. / self.n)
                    self.R += len(self.largest_component) * 1. / self.n
                    self.y.append(len(self.largest_component) * 1. / self.n)
                    self.lcomp.append(len(self.largest_component))

            self.all_x[subj] = np.array(self.x)
            self.all_y[subj] = np.array(self.y)
            self.all_vulnerability[subj] = np.array(0.5 - self.R / self.n)
            self.all_largest_comp[subj] = np.array(self.lcomp)

        return self.all_vulnerability, self.all_x, self.all_y, self.all_largest_comp
예제 #12
0
def process(data):
    return bct.betweenness_wei(data)
예제 #13
0
    def __init__(self,
                 kind,
                 parcel,
                 data='lau',
                 hemi='both',
                 binary=False,
                 version=1,
                 subset='all',
                 path=None):

        mainPath = path + "/brainNetworks/" + data + "/"
        home = os.path.expanduser("~")

        self.info = {}
        self.info["kind"] = kind
        self.info["parcel"] = parcel
        self.info["data"] = data
        self.info["hemi"] = hemi
        self.info["binary"] = binary
        self.info["version"] = version
        self.info["subset"] = subset

        if version == 1:
            version = ''
        else:
            version = "_v2"

        if binary is True:
            binary = "b"
        else:
            binary = ''

        if subset == "all":
            subset = ''

        if hemi == "both":
            hemi = ''

        matrxPath = mainPath + "matrices/" + subset + kind + parcel + hemi + binary + version

        # hemisphere
        self.hemi = np.load(matrxPath + "/hemi.npy")

        # Adjacency matrix
        path = matrxPath + ".npy"
        A = np.load(path)

        # Look at time when file was last modified
        last_modified = os.path.getmtime(path)

        # set negative values to 0, fill diagonal, make symmetric
        A[A < 0] = 0
        np.fill_diagonal(A, 0)
        A = (A + A.T) / 2
        self.adj = A

        # Number of nodes in the network
        self.n = len(self.adj)

        # coordinates
        path = mainPath + "coords/coords" + parcel + hemi + ".npy"
        self.coords = np.load(path)

        # Inverse of adjacency matrix
        inv = A.copy()
        inv[A > 0] = 1 / inv[A > 0]
        self.inv_adj = inv

        # distance
        self.dist = cdist(self.coords, self.coords)

        # shortest path
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/sp.npy"

        if os.path.exists(path) is False:
            print("shortest path not found")
            print("computing shortest path...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("computing shortest paths...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        else:
            self.sp = np.load(path)

        # diffusion embedding
        de = compute_diffusion_map(A, n_components=10, return_result=True)
        self.de = de[0]
        self.de_extra = de[1]

        # Principal components
        self.PCs, self.PCs_ev = load_data.getPCs(self.adj)

        # betweenness centrality
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/bc.npy"
        if os.path.exists(path) is False:

            print("betweenness centrality not found")
            print("computing betweenness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("recomputing betweeness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        else:
            self.bc = np.load(path)

        # communities + participation coefficient
        path = matrxPath + "/communities/"
        if os.path.exists(path):
            files = []
            for i in os.listdir(path):
                if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                    files.append(i)
            if len(files) > 0:
                self.ci = []
                for file in files:
                    self.ci.append(np.load(os.path.join(path, file)))

                self.ppc = []
                for i in range(len(files)):
                    ppc = bct.participation_coef(A, self.ci[i])
                    self.ppc.append(ppc)

        if (data == "HCP") and (kind == "SC"):
            path = mainPath + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
            self.lengths = np.load(path)

        # streamline connection lengths
        path = matrxPath + "/len.npy"
        if os.path.exists(path) is True:
            self.len = np.load(path)

        # network information
        if parcel[0] == "s":
            nb = parcel[1:]
            self.order = "LR"
            self.noplot = [b'Background+FreeSurfer_Defined_Medial_Wall', b'']
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-L_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-R_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
        else:
            nb = _parcel_to_n(parcel)
            self.order = "RL"
            self.noplot = None
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-L_deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-R_deterministic.annot")
            self.cammoun_id = nb
예제 #14
0
def betweennessCentrality(network):
    return bct.betweenness_wei(network)
예제 #15
0
def betweenness_centrality(A):
    A[A == 0] = np.amin(A[A > 0])
    cost = 1 / A
    bc = bct.betweenness_wei(cost)
    return (bc / 2)
예제 #16
0
def create_feature_matrix(structure_matrix_file):
    # Feature matrix with each element containing an NxN array
    feature_matrix = []

    # EDGE WEIGHT (Depth 0)
    # weighted & undirected network
    structural_connectivity_array = np.array(
        pd.DataFrame(loadmat(structure_matrix_file)['connectivity']))
    feature_matrix.append(structural_connectivity_array)

    # DEGREE (Depth 1 & 2)
    # Node degree is the number of links connected to the node.
    deg = bct.degrees_und(structural_connectivity_array)
    fill_array_2D(feature_matrix, deg)

    # *** Conversion of connection weights to connection lengths ***
    connection_length_matrix = bct.weight_conversion(
        structural_connectivity_array, 'lengths')
    # print(connection_length_matrix)

    # SHORTEST PATH LENGTH (Depth 3 & 4)
    '''
    The distance matrix contains lengths of shortest paths between all pairs of nodes.
    An entry (u,v) represents the length of shortest path from node u to node v.
    The average shortest path length is the characteristic path length of the network.
    '''
    shortest_path = bct.distance_wei(connection_length_matrix)
    feature_matrix.append(
        shortest_path[0])  # distance (shortest weighted path) matrix
    feature_matrix.append(
        shortest_path[1]
    )  # matrix of number of edges in shortest weighted path

    # BETWEENNESS CENTRALITY (Depth 5 & 6)
    '''
    Node betweenness centrality is the fraction of all shortest paths in
    the network that contain a given node. Nodes with high values of
    betweenness centrality participate in a large number of shortest paths.
    '''
    bc = bct.betweenness_wei(connection_length_matrix)
    fill_array_2D(feature_matrix, bc)

    # CLUSTERING COEFFICIENTS (Depth 7 & 8)
    '''
    The weighted clustering coefficient is the average "intensity" of
    triangles around a node.
    '''
    cl = bct.clustering_coef_wu(connection_length_matrix)
    fill_array_2D(feature_matrix, cl)

    # Find disconnected nodes - component size set to 1
    new_array = structural_connectivity_array
    W_bin = bct.weight_conversion(structural_connectivity_array, 'binarize')
    [comps, comp_sizes] = bct.get_components(W_bin)
    print('comp: ', comps)
    print('sizes: ', comp_sizes)
    for i in range(len(comps)):
        if (comps[i] != statistics.mode(comps)):
            new_array = np.delete(new_array, new_array[i])

    return feature_matrix
        participants = ['c1', 'c2', 'c3', 'c5', 'c6', 'c7', 'c8']

    all_measures = np.empty(shape=[68, len(participants), 5])
    adjmats = np.empty(shape=[68, 68, len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(
            generate_ROI_file(FreeSurfer_ROI_file)).values
        labels, adjmat = remove_non_cortical_ROIs(labels, adjmat)
        all_measures[:, counter, 0] = bct.degrees_und(adjmat)
        all_measures[:, counter, 1] = bct.strengths_und(adjmat)
        all_measures[:, counter, 2] = bct.clustering_coef_wu(adjmat)
        all_measures[:, counter, 3] = bct.betweenness_wei(adjmat)
        all_measures[:, counter, 4] = bct.efficiency_wei(adjmat, local=True)
        adjmats[:, :, counter] = adjmat
        counter += 1

    mean_measures = np.mean(all_measures, axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures,
                               index=labels,
                               columns=[
                                   'patient.NodeDegree', 'patient.Strength',
                                   'patient.ClustCoeff', 'patient.BetweenCent',
                                   'patient.LocEff'
                               ])
        patient_measures = all_measures
        patient_adjmats = adjmats