Esempio n. 1
0
def omega_sigma(matrix):
    """Returns the small-world coefficients (omega & sigma) of a graph.
    Omega ranges between -1 and 1. Values close to 0 mean the matrix
    features small-world characteristics.
    Values close to -1 mean the network has a lattice structure and values
    close to 1 mean G is a random network.

    A network is commonly classified as small-world if sigma > 1.

    Parameters
    ----------
    matrix : numpy.ndarray
        A weighted undirected graph.
    Returns
    -------
    smallworld : tuple of float
        The small-work coefficients (omega & sigma).
    Notes
    -----
    The implementation is adapted from the algorithm by Telesford et al. [1]_.
    References
    ----------
    .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011).
           "The Ubiquity of Small-World Networks".
           Brain Connectivity. 1 (0038): 367-75.  PMC 3604768. PMID 22432451.
           doi:10.1089/brain.2011.0038.
    """
    transitivity_rand_list = []
    transitivity_latt_list = []
    path_length_rand_list = []
    for i in range(10):
        logging.debug('Generating random and lattice matrices, '
                      'iteration #{}.'.format(i))
        random = bct.randmio_und(matrix, 10)[0]
        lattice = bct.latmio_und(matrix, 10)[1]

        transitivity_rand_list.append(bct.transitivity_wu(random))
        transitivity_latt_list.append(bct.transitivity_wu(lattice))
        path_length_rand_list.append(avg_cast(bct.distance_wei(random)[0]))

    transitivity = bct.transitivity_wu(matrix)
    path_length = avg_cast(bct.distance_wei(matrix)[0])
    transitivity_rand = np.mean(transitivity_rand_list)
    transitivity_latt = np.mean(transitivity_latt_list)
    path_length_rand = np.mean(path_length_rand_list)

    omega = (path_length_rand / path_length) - \
        (transitivity / transitivity_latt)
    sigma = (transitivity / transitivity_rand) / \
        (path_length / path_length_rand)

    return float(omega), float(sigma)
Esempio n. 2
0
    def create_feature_matrix(self):
       # Feature matrix with each element containing an NxN array
       feature_matrix = []

       # EDGE WEIGHT (Depth 0)
       structural_connectivity_array = self.get_structure_and_function()
       feature_matrix.append(structural_connectivity_array)

       # DEGREE (Depth 1 & 2)
       deg = bct.degrees_und(structural_connectivity_array)
       self.fill_array_2D(feature_matrix, deg)

       # Conversion of connection weights to connection lengths
       connection_length_matrix = bct.weight_conversion(structural_connectivity_array, 'lengths')

       # SHORTEST PATH LENGTH (Depth 3 & 4)
       shortest_path = bct.distance_wei(connection_length_matrix)
       feature_matrix.append(shortest_path[0])  # distance (shortest weighted path) matrix
       feature_matrix.append(shortest_path[1])  # matrix of number of edges in shortest weighted path

       # BETWEENNESS CENTRALITY (Depth 5 & 6)
       bc = bct.betweenness_wei(connection_length_matrix)
       from python_files.create_feature_matrix import fill_array_2D
       self.fill_array_2D(feature_matrix, bc)

       # CLUSTERING COEFFICIENTS (Depth 7 & 8)
       cl = bct.clustering_coef_wu(connection_length_matrix)
       self.fill_array_2D(feature_matrix, cl)

       return feature_matrix
def compute_small_worldness(cm, cc, cpl):

    #randmio_und_connected can be found in reference.py
    #second argument is number of iterations
    #construct a random network for comparison with our real network
    rand_network = bct.randmio_und_connected(cm,5)[0]

    #clustering_coef_wu is found in clustering.py
    #make sure that C_rand is non-zero, so we avoid division with zero
    #could probably be made more correct by taking the average of
    #some number of random networks. 
    #we did not do this to keep run time at a minimum
    C_rand = np.mean(bct.clustering_coef_wu(rand_network))
    while C_rand == 0.0:
        rand_network = bct.randmio_und_connected(cm,5)[0]
        C_rand = np.mean(bct.clustering_coef_wu(rand_network))

    #invert can be found in other.py
    rand_inv = bct.invert(rand_network)
    
    #distance_wei and charpath can be found in distance.py
    distance_rand = bct.distance_wei(rand_inv)
    charpath_rand = bct.charpath(distance_rand[0])
 
    #compute the small worldness index according to Rubinov
    C = cc
    L = cpl
    L_rand = charpath_rand[0]

    Ctemp = C/C_rand
    Ltemp = L/L_rand

    S_W = Ctemp / Ltemp

    return S_W
def plot_effective_anat_dist_vs_ab(ref_pattern_df, acp_matrix, epicenters_idx,
                                   roi_labels, output_dir):
    # set diagonal to 1
    for i in range(0, len(acp_matrix)):
        acp_matrix[i][i] = 1
    acp_matrix_reciprocal = np.reciprocal(acp_matrix)
    acp_effective_dist = bct.distance_wei(acp_matrix_reciprocal)
    roi_labels_not_seed = []
    effective_anat_distance_dict = {}
    for i, roi in enumerate(roi_labels):
        dist = 0
        for j in epicenters_idx:
            dist += acp_effective_dist[0][i, j]
        dist = dist / len(epicenters_idx)
        #print("{0}: {1}".format(roi, str(np.round(dist,3))))
        effective_anat_distance_dict[roi] = dist
    roi_dist_ab_df = pd.DataFrame(columns=[
        "Avg_Deposition_Asymptomatic", "Avg_Deposition_Symptomatic",
        "Effective_Anat_Distance"
    ],
                                  index=roi_labels,
                                  dtype="float")
    for i, roi in enumerate(roi_labels):
        roi_dist_ab_df.loc[
            roi, "Effective_Anat_Distance"] = effective_anat_distance_dict[roi]
        roi_dist_ab_df.loc[roi, "Avg_Deposition_Asymptomatic"] = np.mean(
            ref_pattern_df[ref_pattern_df.Symptomatic == "No"].loc[:, roi])
        roi_dist_ab_df.loc[roi, "Avg_Deposition_Symptomatic"] = np.mean(
            ref_pattern_df[ref_pattern_df.Symptomatic == "Yes"].loc[:, roi])

    fig, (ax1, ax2) = plt.subplots(ncols=2,
                                   sharey=False,
                                   sharex=False,
                                   figsize=(6, 3))
    axes = [ax1, ax2]
    for i, status in enumerate(
        ["Avg_Deposition_Asymptomatic", "Avg_Deposition_Symptomatic"]):
        axes[i] = sns.regplot(x="Effective_Anat_Distance",
                              y=status,
                              data=roi_dist_ab_df.loc[roi_labels, :],
                              ax=axes[i])

        r, p = stats.pearsonr(
            roi_dist_ab_df.loc[roi_labels, "Effective_Anat_Distance"],
            roi_dist_ab_df.loc[roi_labels, status])
        title = status.split("_")[-1]
        axes[i].set_xlabel("Effective Anatomical Distance",
                           fontsize=10,
                           axes=axes[i])
        axes[i].set_ylabel(r"Regional A$\beta$ Probability",
                           fontsize=10,
                           axes=axes[i])
        axes[i].set_title(title, fontsize=10)
        axes[i].set_ylim([-0.1, 1])
        axes[i].text(x=1.5,
                     y=0.8,
                     s="r: {0}\np < 0.01".format(str(np.round(r, 3))))
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, "effective_anat_dist_vs_ab.png"))
Esempio n. 5
0
def test_charpath():
    x = load_sample(thres=.02)
    d, e = bct.distance_wei(x)
    l, eff, ecc, radius, diameter = bct.charpath(d)

    assert np.any(np.isinf(d))
    assert not np.isnan(radius)
    assert not np.isnan(diameter)
Esempio n. 6
0
def test_distance_wei():
    x = load_sample(thres=.02)
    d, e = bct.distance_wei(x)
    d[np.where(np.isinf(d))] = 0
    print(np.sum(d), np.sum(e))

    assert np.allclose(np.sum(d), 155650.1, atol=.01)
    assert np.sum(e) == 30570
Esempio n. 7
0
def test_distance_wei():
    x = load_sample(thres=.02)
    d, e = bct.distance_wei(x)
    d[np.where(np.isinf(d))] = 0
    print(np.sum(d), np.sum(e))

    assert np.allclose(np.sum(d), 155650.1, atol=.01)
    assert np.sum(e) == 30570
Esempio n. 8
0
def closenessCentrality(network):
    shortestPathMatrix = bct.distance_wei(network)
    averageShortestPath = []
    for i in range(len(shortestPathMatrix)):
        path = 0
        for j in shortestPathMatrix:
            path += j[i]
        averageShortestPath.append(path/len(shortestPathMatrix))
    return averageShortestPath
Esempio n. 9
0
def get_shortest_path(Network, matrix_path=None, last_modified=0):
    '''
    Function to get the shortest path of a network. If a matrix_path is given,
    this function will try to load the shortest path from this matrix path,
    if the shortest paths have been generated after the adjacency matrix itself
    has been generetaed.
    '''

    if matrix_path is not None:

        path = matrix_path + "/sp.npy"

        if not os.path.exists(path):

            print("shortest path not found")
            print("computing shortest path...")

            sp, _ = bct.distance_wei(Network['inv_adj'])
            np.save(path, sp)

        elif os.path.getmtime(path) < last_modified:

            print("new adjacency matrix was found")
            print("computing shortest paths...")

            sp, _ = bct.distance_wei(Network['inv_adj'])
            np.save(path, sp)

        else:

            sp = np.load(path)

    else:
        sp, _ = bct.distance_wei(Network['inv_adj'])

    return sp
Esempio n. 10
0
def extract_epoch_graph_features(W):
    import bct

    L = bct.weight_conversion(W, "lengths")
    L[W == 0] = np.inf
    D, _ = bct.distance_wei(L)

    l, eff, ecc, radius, diameter = bct.charpath(D, include_infinite=False)

    return [
        bct.clustering_coef_wu(W),
        bct.efficiency_wei(W, local=True),
        bct.betweenness_wei(L),
        ecc,
        [l, eff, radius, diameter],
    ]
Esempio n. 11
0
def multiscale_closeness(conn, pr):
    '''
    Function to compute the multiscale closeness centrality of
    individual nodes in a network

    Parameters
    ----------
    conn : (n, n) ndarray
        Adjacency matrix of our connectome (structural), where n is the
        number of nodes in the network.
    pr : (k, m, n) ndarray
        Transition probabilities of random walkers initiated on individual
        nodes in the network, where 'k' is the number of time points at which
        the transition probabilities are evaluated, 'm' is the number of nodes
        on which the random walks are initiated.

    Returns
    -------
    multiscale_closeness : (k, n) ndarray
        Multiscale closeness centralities for each of the 'n' nodes in the
        network and for each one of the 'k' time points.
    sp : (n, n) ndarray
        Shortest paths between pairs of nodes in the network.
    '''

    k = pr.shape[0]
    n = pr.shape[1]

    # Compute the shortest path between every pair of nodes. The topological
    # distance is computed as inverse of the connection weight between the two
    # nodes.
    inv_conn = conn.copy()
    inv_conn[inv_conn > 0] = 1 / inv_conn[inv_conn > 0]
    sp = bct.distance_wei(inv_conn)[0]

    # Compute the multiscale shortest path
    multiscale_sp = np.zeros((k, n))
    for i in range(k):
        for j in range(n):
            multiscale_sp[i, j] = np.average(sp[j, :], weights=pr[i, j, :])

    multiscale_closeness = zscore(1 / multiscale_sp, axis=1)

    return multiscale_closeness, sp
Esempio n. 12
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_length_matrix, args.in_conn_matrix])

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    if not args.append_json:
        assert_outputs_exist(parser, args, args.out_json)
    else:
        logging.debug('Using --append_json, make sure to delete {} '
                      'before re-launching a group analysis.'.format(
                          args.out_json))

    if args.append_json and args.overwrite:
        parser.error('Cannot use the append option at the same time as '
                     'overwrite.\nAmbiguous behavior, consider deleting the '
                     'output json file first instead.')

    conn_matrix = load_matrix_in_any_format(args.in_conn_matrix)
    len_matrix = load_matrix_in_any_format(args.in_length_matrix)

    if args.filtering_mask:
        mask_matrix = load_matrix_in_any_format(args.filtering_mask)
        conn_matrix *= mask_matrix
        len_matrix *= mask_matrix
    N = len_matrix.shape[0]

    if args.avg_node_wise:
        func_cast = avg_cast
    else:
        func_cast = list_cast

    gtm_dict = {}
    betweenness_centrality = bct.betweenness_wei(len_matrix) / ((N - 1) *
                                                                (N - 2))
    gtm_dict['betweenness_centrality'] = func_cast(betweenness_centrality)
    ci, gtm_dict['modularity'] = bct.modularity_louvain_und(conn_matrix,
                                                            seed=0)

    gtm_dict['assortativity'] = bct.assortativity_wei(conn_matrix, flag=0)
    gtm_dict['participation'] = func_cast(
        bct.participation_coef_sign(conn_matrix, ci)[0])
    gtm_dict['clustering'] = func_cast(bct.clustering_coef_wu(conn_matrix))

    gtm_dict['nodal_strength'] = func_cast(bct.strengths_und(conn_matrix))
    gtm_dict['local_efficiency'] = func_cast(
        bct.efficiency_wei(len_matrix, local=True))
    gtm_dict['global_efficiency'] = func_cast(bct.efficiency_wei(len_matrix))
    gtm_dict['density'] = func_cast(bct.density_und(conn_matrix)[0])

    # Rich club always gives an error for the matrix rank and gives NaN
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        tmp_rich_club = bct.rich_club_wu(conn_matrix)
    gtm_dict['rich_club'] = func_cast(tmp_rich_club[~np.isnan(tmp_rich_club)])

    # Path length gives an infinite distance for unconnected nodes
    # All of this is simply to fix that
    empty_connections = np.where(np.sum(len_matrix, axis=1) < 0.001)[0]
    if len(empty_connections):
        len_matrix = np.delete(len_matrix, empty_connections, axis=0)
        len_matrix = np.delete(len_matrix, empty_connections, axis=1)

    path_length_tuple = bct.distance_wei(len_matrix)
    gtm_dict['path_length'] = func_cast(path_length_tuple[0])
    gtm_dict['edge_count'] = func_cast(path_length_tuple[1])

    if not args.avg_node_wise:
        for i in empty_connections:
            gtm_dict['path_length'].insert(i, -1)
            gtm_dict['edge_count'].insert(i, -1)

    if args.small_world:
        gtm_dict['omega'], gtm_dict['sigma'] = omega_sigma(len_matrix)

    if os.path.isfile(args.out_json) and args.append_json:
        with open(args.out_json) as json_data:
            out_dict = json.load(json_data)
        for key in gtm_dict.keys():
            if isinstance(out_dict[key], list):
                out_dict[key].append(gtm_dict[key])
            else:
                out_dict[key] = [out_dict[key], gtm_dict[key]]
    else:
        out_dict = {}
        for key in gtm_dict.keys():
            out_dict[key] = [gtm_dict[key]]

    with open(args.out_json, 'w') as outfile:
        json.dump(out_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
Esempio n. 13
0
def graph_estimates(cm, th):

    #dictionary for storing our results
    d = OrderedDict()

    #thresholding moved here for other matrices than MatLab matrices
    #removes negative weights
    cm = bct.threshold_absolute(cm, 0.0)

    cm = threshold_connected(cm, th)

    
    #for binarizing the connectivity matrices, 
    #we work with weighted so this is turned off
    #bin_cm = bct.binarize(cm)
    
    #invert the connectivity for computing shortest paths
    cm_inv = bct.invert(cm)

    #modularity_und is found in modularity.py
    modularity_und = bct.modularity_und(cm)

    #the community_affiliation vector that gets input to some of the functions
    community_affiliation = modularity_und[0]
    
    #distance_wei and charpath is found in distance.py
    distance_wei = bct.distance_wei(cm_inv)
    charpath = bct.charpath(distance_wei[0], False, False)

    #clustering_coef_wu is found in clustering.py
    clustering_coef_wu = bct.clustering_coef_wu(cm)
    avg_clustering_coef_wu = np.mean(clustering_coef_wu)


    #assortativity_wei is found in core.py
    d['assortativity_wei-r'] = bct.assortativity_wei(cm, flag=0)

    #just taking the average of clustering_coef_wu
    d['avg_clustering_coef_wu:C'] = avg_clustering_coef_wu

    d['charpath-lambda'] = charpath[0]
    #d['charpath-efficiency'] = charpath[1]   
    #d['charpath-ecc'] = charpath[2]           
    #d['charpath-radius'] = charpath[3]
    #d['charpath-diameter'] = charpath[4]

    d['clustering_coef_wu-C'] = clustering_coef_wu


    d['efficiency_wei-Eglob'] = bct.efficiency_wei(cm)
    #d['efficiency_wei-Eloc'] = bct.efficiency_wei(cm, True)

    #d['modularity_und-ci'] = modularity_und[0]
    d['modularity_und-Q'] = modularity_und[1]

    d['small_worldness:S'] = compute_small_worldness(cm,
                                                     avg_clustering_coef_wu,
                                                     charpath[0])

   
   #transitivity_wu can be found in clustering.py
    d['transitivity_wu-T'] = bct.transitivity_wu(cm)


    #EXAMPLES for local measures and binary measures. Comment in to use. 

    #VECTOR MEASURES
    #d['betweenness_wei-BC'] = bct.betweenness_wei(cm_inv)
    # d['module_degree_zscore-Z'] = bct.module_degree_zscore(cm, community_affiliation)
    #d['degrees_und-deg'] = bct.degrees_und(cm)
    #d['charpath-ecc'] = charpath[2]


    #BINARIES
    # d['clustering_coef_bu-C'] = bct.clustering_coef_bu(bin_cm)
    # d['efficiency_bin-Eglob'] = bct.efficiency_bin(bin_cm)
    # d['efficiency_bin-Eloc'] = bct.efficiency_bin(bin_cm, True)
    #  d['modularity_und_bin-ci'] = modularity_und_bin[0]
    #  d['modularity_und_bin-Q'] = modularity_und_bin[1]
    # d['transitivity_bu-T'] = bct.transitivity_bu(bin_cm)
    #  d['betweenness_bin-BC'] = bct.betweenness_bin(bin_cm)
    #  modularity_und_bin = bct.modularity_und(bin_cm)
    #d['participation_coef'] = bct.participation_coef(cm, community_affiliation)


    ######## charpath giving problems with ecc, radius and diameter
    # np.seterr(invalid='ignore')


    return d
Esempio n. 14
0
def process(data):
    distances, edge_counts = bct.distance_wei(1 / data)
    return distances
Esempio n. 15
0
def create_feature_matrix(structure_matrix_file):
    # Feature matrix with each element containing an NxN array
    feature_matrix = []

    # EDGE WEIGHT (Depth 0)
    # weighted & undirected network
    structural_connectivity_array = np.array(
        pd.DataFrame(loadmat(structure_matrix_file)['connectivity']))
    feature_matrix.append(structural_connectivity_array)

    # DEGREE (Depth 1 & 2)
    # Node degree is the number of links connected to the node.
    deg = bct.degrees_und(structural_connectivity_array)
    fill_array_2D(feature_matrix, deg)

    # *** Conversion of connection weights to connection lengths ***
    connection_length_matrix = bct.weight_conversion(
        structural_connectivity_array, 'lengths')
    # print(connection_length_matrix)

    # SHORTEST PATH LENGTH (Depth 3 & 4)
    '''
    The distance matrix contains lengths of shortest paths between all pairs of nodes.
    An entry (u,v) represents the length of shortest path from node u to node v.
    The average shortest path length is the characteristic path length of the network.
    '''
    shortest_path = bct.distance_wei(connection_length_matrix)
    feature_matrix.append(
        shortest_path[0])  # distance (shortest weighted path) matrix
    feature_matrix.append(
        shortest_path[1]
    )  # matrix of number of edges in shortest weighted path

    # BETWEENNESS CENTRALITY (Depth 5 & 6)
    '''
    Node betweenness centrality is the fraction of all shortest paths in
    the network that contain a given node. Nodes with high values of
    betweenness centrality participate in a large number of shortest paths.
    '''
    bc = bct.betweenness_wei(connection_length_matrix)
    fill_array_2D(feature_matrix, bc)

    # CLUSTERING COEFFICIENTS (Depth 7 & 8)
    '''
    The weighted clustering coefficient is the average "intensity" of
    triangles around a node.
    '''
    cl = bct.clustering_coef_wu(connection_length_matrix)
    fill_array_2D(feature_matrix, cl)

    # Find disconnected nodes - component size set to 1
    new_array = structural_connectivity_array
    W_bin = bct.weight_conversion(structural_connectivity_array, 'binarize')
    [comps, comp_sizes] = bct.get_components(W_bin)
    print('comp: ', comps)
    print('sizes: ', comp_sizes)
    for i in range(len(comps)):
        if (comps[i] != statistics.mode(comps)):
            new_array = np.delete(new_array, new_array[i])

    return feature_matrix
Esempio n. 16
0
    def __init__(self,
                 kind,
                 parcel,
                 data='lau',
                 hemi='both',
                 binary=False,
                 version=1,
                 subset='all',
                 path=None):

        mainPath = path + "/brainNetworks/" + data + "/"
        home = os.path.expanduser("~")

        self.info = {}
        self.info["kind"] = kind
        self.info["parcel"] = parcel
        self.info["data"] = data
        self.info["hemi"] = hemi
        self.info["binary"] = binary
        self.info["version"] = version
        self.info["subset"] = subset

        if version == 1:
            version = ''
        else:
            version = "_v2"

        if binary is True:
            binary = "b"
        else:
            binary = ''

        if subset == "all":
            subset = ''

        if hemi == "both":
            hemi = ''

        matrxPath = mainPath + "matrices/" + subset + kind + parcel + hemi + binary + version

        # hemisphere
        self.hemi = np.load(matrxPath + "/hemi.npy")

        # Adjacency matrix
        path = matrxPath + ".npy"
        A = np.load(path)

        # Look at time when file was last modified
        last_modified = os.path.getmtime(path)

        # set negative values to 0, fill diagonal, make symmetric
        A[A < 0] = 0
        np.fill_diagonal(A, 0)
        A = (A + A.T) / 2
        self.adj = A

        # Number of nodes in the network
        self.n = len(self.adj)

        # coordinates
        path = mainPath + "coords/coords" + parcel + hemi + ".npy"
        self.coords = np.load(path)

        # Inverse of adjacency matrix
        inv = A.copy()
        inv[A > 0] = 1 / inv[A > 0]
        self.inv_adj = inv

        # distance
        self.dist = cdist(self.coords, self.coords)

        # shortest path
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/sp.npy"

        if os.path.exists(path) is False:
            print("shortest path not found")
            print("computing shortest path...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("computing shortest paths...")
            self.sp = bct.distance_wei(self.inv_adj)[0]
            np.save(matrxPath + "/sp.npy", self.sp)

        else:
            self.sp = np.load(path)

        # diffusion embedding
        de = compute_diffusion_map(A, n_components=10, return_result=True)
        self.de = de[0]
        self.de_extra = de[1]

        # Principal components
        self.PCs, self.PCs_ev = load_data.getPCs(self.adj)

        # betweenness centrality
        #
        # Loaded from saved file...
        # IF file not found OR Adjacency was modified after creation,
        # then recompute measure
        path = matrxPath + "/bc.npy"
        if os.path.exists(path) is False:

            print("betweenness centrality not found")
            print("computing betweenness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        elif os.path.getmtime(path) < last_modified:
            print("new adjacency matrix was found")
            print("recomputing betweeness centrality...")
            self.bc = bct.betweenness_wei(self.inv_adj)
            np.save(matrxPath + "/bc.npy", self.bc)

        else:
            self.bc = np.load(path)

        # communities + participation coefficient
        path = matrxPath + "/communities/"
        if os.path.exists(path):
            files = []
            for i in os.listdir(path):
                if os.path.isfile(os.path.join(path, i)) and 'ci_' in i:
                    files.append(i)
            if len(files) > 0:
                self.ci = []
                for file in files:
                    self.ci.append(np.load(os.path.join(path, file)))

                self.ppc = []
                for i in range(len(files)):
                    ppc = bct.participation_coef(A, self.ci[i])
                    self.ppc.append(ppc)

        if (data == "HCP") and (kind == "SC"):
            path = mainPath + "matrices/" + subset + kind + parcel + hemi + "_lengths.npy"
            self.lengths = np.load(path)

        # streamline connection lengths
        path = matrxPath + "/len.npy"
        if os.path.exists(path) is True:
            self.len = np.load(path)

        # network information
        if parcel[0] == "s":
            nb = parcel[1:]
            self.order = "LR"
            self.noplot = [b'Background+FreeSurfer_Defined_Medial_Wall', b'']
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-L_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-schaefer2018/"
                            "fsaverage/"
                            "atl-Schaefer2018_space-fsaverage_"
                            "hemi-R_desc-" + nb + "Parcels7Networks_"
                            "deterministic.annot")
        else:
            nb = _parcel_to_n(parcel)
            self.order = "RL"
            self.noplot = None
            self.lhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-L_deterministic.annot")
            self.rhannot = (home + "/"
                            "nnt-data/"
                            "atl-cammoun2012/"
                            "fsaverage/"
                            "atl-Cammoun2012_space-fsaverage_"
                            "res-" + nb + "_hemi-R_deterministic.annot")
            self.cammoun_id = nb