Example #1
0
def multilayer_pc_degree(mlgraph: np.ndarray) -> np.ndarray:
    """ Multilayer Participation Coefficient (Degree)


    Parameters
    ----------
    mlgraph : array-like, shape(n_layers, n_rois, n_rois)
        A multilayer (undirected) graph. Each layer consists of a graph.


    Returns
    -------
    mpc : array-like
        Participation coefficient based on the degree of the layers' nodes.
    """
    num_layers, num_rois, num_rois = np.shape(mlgraph)

    degrees = np.zeros((num_layers, num_rois))
    for i in range(num_layers):
        a_layer = np.squeeze(mlgraph[i, :, :])
        degrees[i] = bct.degrees_und(a_layer)

    normal_degrees = np.zeros((num_layers, num_rois))
    for i in range(num_rois):
        normal_degrees[:, i] = degrees[:, i] / np.sum(degrees[:, i])

    mpc = np.zeros((num_rois, 1))
    for i in range(num_rois):
        mpc[i] = (np.float32(num_layers) / (num_layers - 1)) * (
            1.0 - np.sum(np.power(normal_degrees[:, i], 2.0)))

    return mpc
Example #2
0
    def create_feature_matrix(self):
       # Feature matrix with each element containing an NxN array
       feature_matrix = []

       # EDGE WEIGHT (Depth 0)
       structural_connectivity_array = self.get_structure_and_function()
       feature_matrix.append(structural_connectivity_array)

       # DEGREE (Depth 1 & 2)
       deg = bct.degrees_und(structural_connectivity_array)
       self.fill_array_2D(feature_matrix, deg)

       # Conversion of connection weights to connection lengths
       connection_length_matrix = bct.weight_conversion(structural_connectivity_array, 'lengths')

       # SHORTEST PATH LENGTH (Depth 3 & 4)
       shortest_path = bct.distance_wei(connection_length_matrix)
       feature_matrix.append(shortest_path[0])  # distance (shortest weighted path) matrix
       feature_matrix.append(shortest_path[1])  # matrix of number of edges in shortest weighted path

       # BETWEENNESS CENTRALITY (Depth 5 & 6)
       bc = bct.betweenness_wei(connection_length_matrix)
       from python_files.create_feature_matrix import fill_array_2D
       self.fill_array_2D(feature_matrix, bc)

       # CLUSTERING COEFFICIENTS (Depth 7 & 8)
       cl = bct.clustering_coef_wu(connection_length_matrix)
       self.fill_array_2D(feature_matrix, cl)

       return feature_matrix
    def compute(self):
        centrality = bct.degrees_und(self.g)

        self.stats['Degree'] = [v for v in centrality]

        # Lobe grouping
        plots.create_bar_lobe_grouped_descending(
            "reports/plots/" + self.name + "_degree_descending.pdf",
            self.name + " Degree Centrality", 'Degree', centrality)

        plt.show()
        '''
        # Degree distribution
        distribution = self.stats.groupby(['Degree']).size().reset_index(name='Frequency')
        sum = distribution['Frequency'].sum()
        distribution['Probability'] = distribution['Frequency'] / sum
        distribution.head(10)

        alpha = plots.create_bar("reports/plots/" + self.name + "_degree_distribution.pdf", self.name + " Degree Distribution",
                                 'Degree', distribution['Degree'],
                                 'Probability', distribution['Probability'],
                                  yticks=[0, 0.05, 0.1, 0.15])

        plt.show()
        '''
        average = statistics.mean(self.stats['Degree'])

        #print('Degree distribution gamma= ' + str(alpha) + "\n")

        print("Average Degree = " + str(average) + "\n")

        return self.stats
Example #4
0
def scale_free_tau(corrmat, skew_thresh, proportional=True):
    ''''
    Calculates threshold at which network becomes scale-free, estimated from the skewness of the networks degree distribution.
    Parameters
    ----------
    corrmat : numpy.array
        Correlation or other connectivity matrix from which tau_connected will be estimated.
        Should be values between 0 and 1.
    proportional : bool
        Determines whether connectivity matrix is thresholded proportionally or absolutely.
        Default is proportional as maintaining network density across participants is a priority
    Returns
    -------
    tau : float
        Lowest vaue of tau (threshold) at which network is scale-free.
    '''
    tau = 0.01
    skewness = 1
    while abs(skewness) > 0.3:
        if proportional:
            w = bct.threshold_proportional(corrmat, tau)
        else:
            w = bct.threshold_absolute(corrmat, tau)
        skewness = skew(bct.degrees_und(w))
        tau += 0.01
    return tau
    def compute(self):
        self.centrality = bct.degrees_und(self.g)
        self.region_names = pd.read_csv("data/lobes.node", " ", header='infer')['RegionName']

        if str(self.algorithm).lower() == 'all' or self.algorithm is None:
            self.louvain()
            self.spinglass()
            self.walktrap()
            self.girvan_newman()
            self.infomap()

        if str(self.algorithm).lower() == 'louvain':
            communities = self.louvain()
            return communities

        if str(self.algorithm).lower() == 'spinglass':
            communities = self.spinglass()
            return communities

        if str(self.algorithm).lower() == 'walktrap':
            communities = self.walktrap()
            return communities

        if str(self.algorithm).lower() == 'girvan_newman':
            communities = self.girvan_newman()
            return communities

        if str(self.algorithm).lower() == 'infomap':
            communities = self.infomap()
            return communities
Example #6
0
def k_core_decomposition(mtx, threshold):
    """ Threshold a binary graph based on the detected k-cores.

    .. [Alvarez2006] Alvarez-Hamelin, J. I., Dall'Asta, L., Barrat, A., & Vespignani, A. (2006). Large scale networks fingerprinting and visualization using the k-core decomposition. In Advances in neural information processing systems (pp. 41-50).
    .. [Hagman2008] Hagmann, P., Cammoun, L., Gigandet, X., Meuli, R., Honey, C. J., Wedeen, V. J., & Sporns, O. (2008). Mapping the structural core of human cerebral cortex. PLoS biology, 6(7), e159.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Binary matrix.

    threshold : int
        Degree threshold.


    Returns
    -------
    k_cores : array-like, shape(N, 1)
        A binary matrix of the decomposed cores.
    """
    imtx = mtx

    N, _ = np.shape(mtx)

    # in_degree = np.sum(mtx, 0)
    # out_degree = np.sum(mtx, 1)

    degree = bct.degrees_und(mtx)

    for i in range(N):
        if degree[i] < threshold:
            for l in range(N):
                imtx[i, l] = 0

        # Recalculate the list of the degrees
        degree = bct.degrees_und(imtx)

    k_cores = np.zeros((N, 1), dtype=np.int32)

    for i in range(N):
        if degree[i] > 0:
            k_cores[i] = 1

    return k_cores
    def compute(self):
        centrality_A = bct.degrees_und(self.gA)
        centrality_B = bct.degrees_und(self.gB)

        plots.create_plot_lobecolored("reports/plots/" + self.nameA + '_' +
                                      self.nameB + "_degree_correlation.pdf",
                                      self.nameA + " Degree vs. " +
                                      self.nameB + " Degree",
                                      self.nameA + " Degree",
                                      centrality_A,
                                      self.nameB + " Degree",
                                      centrality_B,
                                      also_log_scale=False,
                                      yticks=[0, 45])

        plt.show()

        return
Example #8
0
def get_network_measures(fname_connectivity):

    C = pd.read_table(fname_connectivity, header=None, dtype=object)
    #cleaning up structural data
    C = C.drop([0, 1], axis=1)
    C = C.drop([0], axis=0)
    C = C.iloc[:, :-1]
    #C_electrode_names = np.array([e[-4:] for e in  np.array(C.iloc[0])])
    C = np.array(C.iloc[1:, :]).astype(
        'float64')  #finally turn into numpy array

    #binarize connectivity matrix
    C_binarize = bct.weight_conversion(C, "binarize")

    #Calculate Network Measures:

    # 1. Density
    density = bct.density_und(C_binarize)[0]

    # 2. Degree
    degree_mean = np.mean(bct.degrees_und(C_binarize))

    # 3. Clustering Coefficient
    clustering_coefficient = bct.clustering_coef_bu(C_binarize)
    clustering_coefficient_mean = np.mean(clustering_coefficient)

    # 4. characteristic path length (i.e. average shortest path length)
    #Get distance
    C_dist = bct.distance_bin(C_binarize)
    #If there are any disjointed nodes set them equal to the largest non-Inf length
    C_dist_max = np.nanmax(
        C_dist[C_dist != np.inf])  #find the max length (that's not infinity)
    C_dist[np.where(C_dist == np.inf
                    )] = C_dist_max  #find the inifnities, and replace with max
    characteristic_path_length = bct.charpath(C_dist)[0]

    # 5. Small Worldness
    Cr = degree_mean / len(C_binarize)
    Lr = np.log10(len(C_binarize)) / np.log10(degree_mean)

    gamma = clustering_coefficient_mean / Cr
    lamb = characteristic_path_length / Lr

    sigma = gamma / lamb
    small_worldness = sigma

    network_measures = np.zeros(shape=(1, 5))
    network_measures[0, :] = [
        density, degree_mean, clustering_coefficient_mean,
        characteristic_path_length, small_worldness
    ]
    colLabels = [
        "Density", "degree_mean", "clustering_coefficient_mean",
        "characteristic_path_length", "small_worldness"
    ]
    network_measures_df = pd.DataFrame(network_measures, columns=colLabels)
    return network_measures_df
def get_network_measures(ifname_connectivity):

    C = np.array(pd.DataFrame(loadmat(ifname_connectivity)['connectivity']))
    #binarize connectivity matrix
    C_binarize = bct.weight_conversion(C, "binarize")

    #Calculate Network Measures:

    # 1. Density
    density = bct.density_und(C_binarize)[0]

    # 2. Degree
    degree_mean = np.mean(bct.degrees_und(C_binarize))

    # 3. Clustering Coefficient
    clustering_coefficient = bct.clustering_coef_bu(C_binarize)
    clustering_coefficient_mean = np.mean(clustering_coefficient)

    # 4. characteristic path length (i.e. average shortest path length)
    #Get distance
    C_dist = bct.distance_bin(C_binarize)
    #If there are any disjointed nodes set them equal to the largest non-Inf length
    C_dist_max = np.nanmax(
        C_dist[C_dist != np.inf])  #find the max length (that's not infinity)
    C_dist[np.where(C_dist == np.inf
                    )] = C_dist_max  #find the inifnities, and replace with max
    characteristic_path_length = bct.charpath(C_dist)[0]

    # 5. Small Worldness
    Cr = degree_mean / len(C_binarize)
    Lr = np.log10(len(C_binarize)) / np.log10(degree_mean)

    gamma = clustering_coefficient_mean / Cr
    lamb = characteristic_path_length / Lr

    sigma = gamma / lamb
    small_worldness = sigma

    network_measures = np.zeros(shape=(1, 5))
    network_measures[0, :] = [
        density, degree_mean, clustering_coefficient_mean,
        characteristic_path_length, small_worldness
    ]
    colLabels = [
        "Density", "degree_mean", "clustering_coefficient_mean",
        "characteristic_path_length", "small_worldness"
    ]
    network_measures_df = pd.DataFrame(network_measures, columns=colLabels)
    return network_measures_df
Example #10
0
def laplacian_energy(mtx: np.ndarray) -> float:
    """ Laplacian Energy


    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.


    Returns
    -------
    le : float
        The Laplacian Energy.
    """
    lmtx = scipy.sparse.csgraph.laplacian(mtx, normed=False)
    w, _ = np.linalg.eig(lmtx)
    avg_degree = np.mean(bct.degrees_und(mtx))
    le = np.sum(np.abs(w - avg_degree))

    return le
sns.heatmap(pearson[:,:,100], square=True  , vmin = -0.4, vmax = 1)


threshPos = 0.4
threshNeg = -0.2
pearsonBinary = copy.deepcopy(pearson)
pearsonBinary[ np.abs(pearsonBinary) < threshPos] = 0
pearsonBinaryPos = copy.deepcopy(pearson)
pearsonBinaryPos[ pearsonBinaryPos < threshPos] = 0
pearsonBinaryNeg = copy.deepcopy(pearson)
pearsonBinaryNeg[ pearsonBinaryNeg > threshNeg] = 0



degree = bct.degrees_und(pearsonBinary[:,:,:]).T
degreePos = bct.degrees_und(pearsonBinaryPos[:,:,:]).T
degreeNeg = bct.degrees_und(pearsonBinaryNeg[:,:,:]).T

strength =  bct.strengths_und(np.abs( pearson[:,:,:])).T

strengthPos = np.zeros(shape = (windows, nchan))
strengthNeg = np.zeros(shape = (windows, nchan))
for win in range(windows):
    strengthPos[win, :], strengthNeg[win, :] , _, _= bct.strengths_und_sign( pearson[:,:,win])


strengthNegAbs = np.abs(strengthNeg)

#normalize
degreeNorm = degree / degree.max(axis=0)
for group in groups:
    if group == 'patient':
        participants = ['z1','z2','z3','z4','z5','z6','z8']
    elif group == 'control':
        participants = ['c1','c2','c3','c5','c6','c7','c8']
        
    all_measures = np.empty(shape=[68,len(participants),5])
    adjmats =  np.empty(shape=[68,68,len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(generate_ROI_file(FreeSurfer_ROI_file)).values
        labels,adjmat = remove_non_cortical_ROIs(labels,adjmat)
        all_measures[:,counter,0] = bct.degrees_und(adjmat)
        all_measures[:,counter,1] = bct.strengths_und(adjmat)
        all_measures[:,counter,2] = bct.clustering_coef_wu(adjmat)
        all_measures[:,counter,3] = bct.betweenness_wei(adjmat)
        all_measures[:,counter,4] = bct.efficiency_wei(adjmat,local=True)
        adjmats[:,:,counter] = adjmat
        counter += 1
        
        
    mean_measures = np.mean(all_measures,axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures, index=labels,columns=['patient.NodeDegree','patient.Strength','patient.ClustCoeff','patient.BetweenCent','patient.LocEff'])
        patient_measures = all_measures
        patient_adjmats = adjmats
    elif group == 'control':
        control = pd.DataFrame(mean_measures, index=labels,columns=['control.NodeDegree','control.Strength','control.ClustCoeff','control.BetweenCent','control.LocEff'])
Example #13
0
    2: '0 255 0',
    3: '255 255 0',
    4: '0 0 255',
    5: '255 0 255',
    6: '51 255 255'
}
count = 0
for line in fp_roi:
    #  if line[0] != '#':
    elms = line.split()
    vc[int(elms[1])] = module_rgb_dict[Ci[count]]  # id -> 'r g b'
    count += 1

vc[0] = '255 255 255'  # vertex id not assoc'd with any parcel gets colored white

degs = bct.degrees_und(adjMtx)

#-------------------------------
# 1st pass through the .obj file: count # of verts and faces (triangles)
in_fname = '../anatomy/' + root_fname + '.obj'
try:
    fp = open(in_fname, 'r')
except:
    print("Error opening " + in_fname)

numv = 0
numf = 0
for line in fp:
    if line[0] == 'v':
        numv += 1
    elif line[0] == 'f':
##
projected_erm_cov = mne.compute_raw_covariance(projected_erm_raw, tmin=0, tmax=None)
cov.plot(raw.info)
erm_cov.plot(erm_raw.info)
projected_erm_cov.plot(projected_erm_raw.info)

corr_zz = corr_z.copy()

corr_zz[corr_zz<0] = 0

stc = get_stc(labels_fname, corr_z[temporal_labels_indices,:].mean(0))
brain = stc.plot(subject='fsaverageSK', time_viewer=True,hemi='split', colormap='gnuplot',
                           views=['lateral','medial'],
                 surface='inflated10', subjects_dir=subjects_dir,clim={'kind':'value','lims':[5,7.5,10]})

stc = get_stc(labels_fname, corr_rest_median[temporal_labels_indices,:].mean(0))
brain = stc.plot(subject='fsaverageSK', time_viewer=True,hemi='split', colormap='gnuplot',
                           views=['lateral','medial'],
                 surface='inflated10', subjects_dir=subjects_dir,clim={'kind':'value','lims':[0,.5,1]})


corr = np.int32(bct.utils.threshold_proportional(corr_z,.15) > 0)
deg = bct.degrees_und(corr)

stc = get_stc(labels_fname, corr_z[254,:])
brain = stc.plot(subject='fsaverageSK', time_viewer=True,hemi='split', colormap='gnuplot',
                           views=['lateral','medial'],
                 surface='inflated10', subjects_dir=subjects_dir,clim={'kind':'value','lims':[5,6,7]})

brain.save_image('beta_projected_erm_corr.png')
Example #15
0
vc = {}  # vertex color dictionary
count = 0
# colors (rgb) for modules (Q: what's the max # of modules we'll have??)
# rf. http://www.rapidtables.com/web/color/RGB_Color.htm
# Red,Green,Yellow,Blue,...
module_rgb_dict = {1:'255 0 0', 2: '0 255 0', 3: '255 255 0', 4: '0 0 255', 5: '255 0 255', 6: '51 255 255'}
count = 0
for line in fp_roi:
#  if line[0] != '#':
  elms = line.split()
  vc[int(elms[1])] = module_rgb_dict[Ci[count]]    # id -> 'r g b'
  count += 1

vc[0] = '255 255 255'   # vertex id not assoc'd with any parcel gets colored white

degs = bct.degrees_und(adjMtx)

#-------------------------------
# 1st pass through the .obj file: count # of verts and faces (triangles)
in_fname = '../anatomy/' + root_fname + '.obj'
try:
  fp = open(in_fname, 'r')
except:
  print("Error opening " + in_fname)

numv = 0
numf = 0
for line in fp:
  if line[0] == 'v':
    numv += 1
  elif line[0] == 'f':
Example #16
0
                                    subject,
                                    "{0}-session-{1}_{2}-{3}_{4}-corrmat.csv".
                                    format(subject, i, task, conditions[j],
                                           mask),
                                ),
                                corrmat,
                            )

                            # reset kappa starting point
                            # calculate proportion of connections that can be retained
                            # before degree dist. ceases to be scale-free
                            kappa = 0.01
                            skewness = 1
                            while abs(skewness) > 0.3:
                                w = bct.threshold_proportional(corrmat, kappa)
                                skewness = skew(bct.degrees_und(w))
                                kappa += 0.01
                            df.at[(subject, sessions[i], task, conds[j], mask),
                                  "k_scale-free", ] = kappa

                            # reset kappa starting point
                            # calculate proportion of connections that need to be retained
                            # for node connectedness
                            kappa = 0.01
                            num = 2
                            while num > 1:
                                w = bct.threshold_proportional(corrmat, kappa)
                                [comp, num] = bct.get_components(w)
                                num = np.unique(comp).shape[0]
                                kappa += 0.01
                            df.at[(subject, sessions[i], task, conds[j], mask),
Example #17
0
def create_feature_matrix(structure_matrix_file):
    # Feature matrix with each element containing an NxN array
    feature_matrix = []

    # EDGE WEIGHT (Depth 0)
    # weighted & undirected network
    structural_connectivity_array = np.array(
        pd.DataFrame(loadmat(structure_matrix_file)['connectivity']))
    feature_matrix.append(structural_connectivity_array)

    # DEGREE (Depth 1 & 2)
    # Node degree is the number of links connected to the node.
    deg = bct.degrees_und(structural_connectivity_array)
    fill_array_2D(feature_matrix, deg)

    # *** Conversion of connection weights to connection lengths ***
    connection_length_matrix = bct.weight_conversion(
        structural_connectivity_array, 'lengths')
    # print(connection_length_matrix)

    # SHORTEST PATH LENGTH (Depth 3 & 4)
    '''
    The distance matrix contains lengths of shortest paths between all pairs of nodes.
    An entry (u,v) represents the length of shortest path from node u to node v.
    The average shortest path length is the characteristic path length of the network.
    '''
    shortest_path = bct.distance_wei(connection_length_matrix)
    feature_matrix.append(
        shortest_path[0])  # distance (shortest weighted path) matrix
    feature_matrix.append(
        shortest_path[1]
    )  # matrix of number of edges in shortest weighted path

    # BETWEENNESS CENTRALITY (Depth 5 & 6)
    '''
    Node betweenness centrality is the fraction of all shortest paths in
    the network that contain a given node. Nodes with high values of
    betweenness centrality participate in a large number of shortest paths.
    '''
    bc = bct.betweenness_wei(connection_length_matrix)
    fill_array_2D(feature_matrix, bc)

    # CLUSTERING COEFFICIENTS (Depth 7 & 8)
    '''
    The weighted clustering coefficient is the average "intensity" of
    triangles around a node.
    '''
    cl = bct.clustering_coef_wu(connection_length_matrix)
    fill_array_2D(feature_matrix, cl)

    # Find disconnected nodes - component size set to 1
    new_array = structural_connectivity_array
    W_bin = bct.weight_conversion(structural_connectivity_array, 'binarize')
    [comps, comp_sizes] = bct.get_components(W_bin)
    print('comp: ', comps)
    print('sizes: ', comp_sizes)
    for i in range(len(comps)):
        if (comps[i] != statistics.mode(comps)):
            new_array = np.delete(new_array, new_array[i])

    return feature_matrix
Example #18
0
    label_data_orig = np.abs(label_data)
    label_data_cont = np.transpose(
        np.dstack((label_data_orig, np.transpose(label_data_orth, (1, 2, 0)))),
        (1, 2, 0))
    corr_mats[index] = np.array([np.corrcoef(dat)
                                 for dat in label_data_cont])[:, 0, 1:].T
    print(float(index) / len(labels) * 100)

corr_mats = np.transpose(corr_mats, (2, 0, 1))

corr = np.median(np.array([(np.abs(corr_mat) + np.abs(corr_mat).T) / 2.
                           for corr_mat in corr_mats]),
                 axis=0)

corr = np.int32(bct.utils.threshold_proportional(corr, .15) > 0)
deg = np.array(bct.degrees_und(corr))

data = {'corr_mats': corr_mats, 'labels': labels, 'corr': corr, 'deg': deg}

pkl_fname = os.path.join(
    data_path, subject + '_lf_' + str(int(lf)) + '_hf_' + str(int(hf)) +
    '_labels_' + str(len(labels)) + '_timestamp_' + '_'.join(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split(' ')) +
    '_corr_orth.pkl')
joblib.dump(data, pkl_fname)

labels_path = '/cluster/transcend/sheraz/Dropbox/mne-camcan-data/recons/fsaverageSK/2009_labels/'

labels_fname = [
    str(labels_path + label.name[-2:] + '.' + label.name[:-3] + '.label')
    for label in labels
Example #19
0
def threshold_mean_degree(mtx, threshold_mean_degree):
    """ Threshold a graph based on the mean degree.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.

    threshold_mean_degree : int
        Mean degree threshold.


    Returns
    -------
    binary_mtx : array-like, shape(N, N)
        A binary mask matrix.
    """
    binary_mtx = np.zeros_like(mtx, dtype=np.int32)
    N, _ = np.shape(mtx)

    iterations = 100
    step = 1.0 / iterations
    thres = 0.0
    thresdeg = np.zeros((iterations, 2))

    for i in range(iterations):
        thres += step

        tmp_binary = np.zeros_like(binary_mtx)

        for k in range(N):
            for l in range(k + 1, N):
                if mtx[k, l] > thres:
                    tmp_binary[k, l] = 1
                    tmp_binary[l, k] = 1

        degree = bct.degrees_und(tmp_binary)

        thresdeg[i, 0] = np.mean(degree)
        thresdeg[i, 1] = thres

    # find the nearest mean degree to kk
    diff = np.zeros((iterations, 1))

    for i in range(iterations):
        diff[i] = np.abs(thresdeg[i, 0] - threshold_mean_degree)

    # find the mean degree with the min difference from kk
    r = np.argmin(diff)

    # find the threhold corresponds to the mean degree
    # mdegree = thresdeg[r, 0]
    thres = thresdeg[r, 1]

    for k in range(N):
        for l in range(k + 1, N):
            if mtx[k, l] > thres:
                binary_mtx[k, l] = 1
                binary_mtx[l, k] = 1

    return binary_mtx
Example #20
0
def threshold_global_cost_efficiency(mtx, iterations):
    """ Threshold a graph based on the Global Efficiency - Cost formula.

    .. [Basset2009] Bassett, D. S., Bullmore, E. T., Meyer-Lindenberg, A., Apud, J. A., Weinberger, D. R., & Coppola, R. (2009). Cognitive fitness of cost-efficient brain functional networks. Proceedings of the National Academy of Sciences, 106(28), 11747-11752.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.

    iterations : int
        Number of steps, as a resolution when search for optima.


    Returns
    -------
    binary_mtx : array-like, shape(N, N)
        A binary mask matrix.

    threshold : float
        The threshold that maximizes the global cost efficiency.

    global_cost_eff_max : float
        Global cost efficiency.

    efficiency : float
        Global efficiency.

    cost_max : float
        Cost of the network at the maximum global cost efficiency
    """
    binary_mtx = np.zeros_like(mtx, dtype=np.int32)

    step = 1.0 / iterations

    thresholds = np.arange(0, 1 + step, step)

    N, _ = np.shape(mtx)

    num_connections = (N * (N - 1)) / 2.0

    global_cost_eff = np.zeros((iterations, 1))

    cost = np.zeros((iterations, 1))

    for i in range(iterations):
        tmp_binary = np.zeros_like(binary_mtx)

        for k in range(N):
            for l in range(k + 1, N):
                if mtx[k, l] > thresholds[i]:
                    tmp_binary[k, l] = 1
                    tmp_binary[l, k] = 1

        global_eff = bct.efficiency_bin(tmp_binary)

        degree = bct.degrees_und(tmp_binary)
        total_degree = np.sum(degree)

        cost[i] = (0.5 * total_degree) / num_connections
        global_cost_eff[i] = global_eff - cost[i]

    indx_max = np.argmax(global_cost_eff)
    threshold = thresholds[indx_max]

    for k in range(N):
        for l in range(k + 1, N):
            if mtx[k, l] >= threshold:
                binary_mtx[k, l] = 1
                binary_mtx[l, k] = 1

    cost_max = cost[indx_max]
    global_cost_eff_max = global_cost_eff[indx_max]
    efficiency = bct.efficiency_bin(binary_mtx)

    # import matplotlib.pyplot as plt
    # plt.figure()
    # plt.plot(cost, global_cost_eff)
    # plt.plot(cost_max, global_cost_eff_max, 'b*', label='Max Global Cost Efficiency')
    # plt.title('Economical small-world network at max Global Cost Efficiency')
    # plt.xlabel('Cost')
    # plt.ylabel('Global Cost Efficiency')
    # plt.legend()
    # plt.show()

    return binary_mtx, threshold, global_cost_eff_max, efficiency, cost_max
def process(data):
    return bct.degrees_und(data)
Example #22
0
                corr_pln += [CorrelationAnalyzer(nits)]

            corr_cls_coef = [d.corrcoef for d in corr_cls]
            corr_pln_coef = [d.corrcoef for d in corr_pln]

            full_matrix = np.concatenate(
                [np.abs(corr_cls_coef),
                 np.abs(corr_pln_coef)], axis=0)
            threshold = np.median(full_matrix[np.nonzero(full_matrix)]) + \
                np.std(full_matrix[np.nonzero(full_matrix)])

            data_cls_bin = np.abs(corr_cls_coef) > threshold
            data_pln_bin = np.abs(corr_pln_coef) > threshold

            deg_cls_tmp = np.asarray(
                [bct.degrees_und(g) for g in data_cls_bin])

            deg_pln_tmp = np.asarray(
                [bct.degrees_und(g) for g in data_pln_bin])

            trans_cls_tmp = np.asarray(
                [bct.transitivity_bu(g) for g in data_cls_bin])
            trans_pln_tmp = np.asarray(
                [bct.transitivity_bu(g) for g in data_pln_bin])

            cp_cls_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in data_cls_bin])
            cp_pln_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in data_pln_bin])

            # Add measure to results list
Example #23
0
def threshold_omst_global_cost_efficiency(mtx, n_msts=None):
    """ Threshold a graph by optimizing the formula GE-C via orthogonal MSTs.

    .. [Dimitriadis2017a] Dimitriadis, S. I., Salis, C., Tarnanas, I., & Linden, D. E. (2017). Topological Filtering of Dynamic Functional Brain Networks Unfolds Informative Chronnectomics: A Novel Data-Driven Thresholding Scheme Based on Orthogonal Minimal Spanning Trees (OMSTs). Frontiers in neuroinformatics, 11.
    .. [Dimitriadis2017n] Dimitriadis, S. I., Antonakakis, M., Simos, P., Fletcher, J. M., & Papanicolaou, A. C. (2017). Data-driven Topological Filtering based on Orthogonal Minimal Spanning Trees: Application to Multi-Group MEG Resting-State Connectivity. Brain Connectivity, (ja).
    .. [Basset2009] Bassett, D. S., Bullmore, E. T., Meyer-Lindenberg, A., Apud, J. A., Weinberger, D. R., & Coppola, R. (2009). Cognitive fitness of cost-efficient brain functional networks. Proceedings of the National Academy of Sciences, 106(28), 11747-11752.



    Parameters
    ----------
    mtx : array-like, shape(N, N)
        Symmetric, weighted and undirected connectivity matrix.

    n_msts : int or None
        Maximum number of OMSTs to compute. Default `None`; an exhaustive
        computation will be performed.

    Returns
    -------
    nCIJtree : array-like, shape(n_msts, N, N)
        A matrix containing all the orthogonal MSTs.

    CIJtree : array-like, shape(N, N)
        Resulting graph.

    degree : float
        The mean degree of the resulting graph.

    global_eff : float
        Global efficiency of the resulting graph.

    global_cost_eff_max : float
        The value where global efficiency - cost is maximized.

    cost_max : float
        Cost of the network at the maximum global cost efficiency.
    """
    imtx = np.copy(mtx)
    imtx_uptril = np.copy(mtx)

    N, _ = np.shape(imtx)

    for k in range(N):
        for l in range(k + 1, N):
            imtx_uptril[l, k] = 0.0
    np.fill_diagonal(imtx_uptril, 0.0)

    # Find the number of orthogonal msts according to the desired mean degree
    num_edges = len(np.where(imtx > 0.0)[0])

    if n_msts is None:
        num_msts = np.round(num_edges / (N - 1)) + 1
    else:
        num_msts = n_msts
    pos_num_msts = np.round(num_edges / (N - 1))

    if num_msts > pos_num_msts:
        num_msts = pos_num_msts

    CIJnotintree = imtx

    # Keep the N-1 connections of the num_msts MSTs
    num_msts = np.int32(num_msts)
    mst_conn = np.zeros((num_msts * (N - 1), 2))

    nCIJtree = np.zeros((num_msts, N, N))  #, dtype=np.int32)
    omst = np.zeros((num_msts, N, N), dtype=np.float32)

    # Repeat N-2 times
    count = 0
    CIJtree = np.zeros((N, N))

    for no in range(num_msts):
        tmp_mtx = 1.0 / CIJnotintree
        # ugly code ~_~
        graph = nx.from_numpy_matrix(tmp_mtx)
        # graph = nx.Graph()
        # for x in range(N):
        #     for y in range(x+1, N):
        #         graph.add_edge(x, y, weight=tmp_mtx[x][y])
        mst = nx.minimum_spanning_tree(graph)
        links = list(mst.edges())

        new_mst = np.zeros((N, N))
        mst_num_links = len(links)
        for k in range(mst_num_links):
            link1 = links[k][0]
            link2 = links[k][1]

            CIJtree[link1, link2] = imtx[link1, link2]
            CIJtree[link2, link1] = imtx[link1, link2]

            mst_conn[count, 0] = link1
            mst_conn[count, 1] = link2

            new_mst[link1, link2] = imtx[link1, link2]
            new_mst[link2, link1] = imtx[link1, link2]
            count += 1

        iCIJtree = np.ones((N, N))
        iCIJtree[np.where(CIJtree != 0.0)] = 0
        CIJnotintree = CIJnotintree * iCIJtree
        nCIJtree[no, :, :] = CIJtree
        omst[no, :, :] = new_mst

    global_eff_ini = bct.efficiency_wei(imtx_uptril) * 2.0
    cost_ini = np.sum(imtx_uptril[:])

    # Insert the 1st MST
    graph = np.zeros((N, N))
    global_cost_eff = np.zeros((num_msts, 1))
    degrees = np.zeros((num_msts, 1))
    cost = np.zeros((num_msts, 1))

    for k in range(num_msts):
        graph = nCIJtree[k, :, :]

        degree = bct.degrees_und(graph)
        mean_degree = np.mean(degree)
        degrees[k] = mean_degree

        cost[k] = np.sum(graph) / cost_ini

        global_eff = bct.efficiency_wei(graph)
        global_cost_eff[k] = global_eff / global_eff_ini - cost[k]

    # Get the OMST where the formula GE-C is maximized
    indx_max = np.argmax(global_cost_eff)

    # Final output
    degree = degrees[indx_max]
    CIJtree = nCIJtree[indx_max, :, :]
    cost_max = cost[indx_max]
    global_eff = bct.efficiency_wei(1.0 / CIJtree)
    global_cost_eff_max = global_cost_eff[indx_max]

    # import matplotlib.pyplot as plt
    # plt.figure()
    # plt.plot(cost, global_cost_eff)
    # plt.plot(cost_max, global_cost_eff_max, 'b*', label='Max Global Cost Efficiency')
    # plt.title('Economical small-world network at max Global Cost Efficiency')
    # plt.xlabel('Cost')
    # plt.ylabel('Global Cost Efficiency')
    # plt.legend()
    # plt.show()

    # return nCIJtree, CIJtree, degree, global_eff, global_cost_eff_max, cost_max
    return nCIJtree, CIJtree, degree, global_eff, global_cost_eff_max, cost_max, cost, global_cost_eff
                corr_pln += [CorrelationAnalyzer(nits)]

            corr_cls_coef = [d.corrcoef for d in corr_cls]
            corr_pln_coef = [d.corrcoef for d in corr_pln]

            full_matrix = np.concatenate(
                [np.abs(corr_cls_coef), np.abs(corr_pln_coef)], axis=0)
            threshold = np.median(full_matrix[np.nonzero(full_matrix)]) + \
                np.std(full_matrix[np.nonzero(full_matrix)])

            data_cls_bin = np.abs(corr_cls_coef) > threshold
            data_pln_bin = np.abs(corr_pln_coef) > threshold

            deg_cls_tmp = np.asarray(
                [bct.degrees_und(g) for g in data_cls_bin])

            deg_pln_tmp = np.asarray(
                [bct.degrees_und(g) for g in data_pln_bin])

            trans_cls_tmp = np.asarray(
                [bct.transitivity_bu(g) for g in data_cls_bin])
            trans_pln_tmp = np.asarray(
                [bct.transitivity_bu(g) for g in data_pln_bin])

            cp_cls_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in data_cls_bin])
            cp_pln_tmp = np.asarray(
                [bct.distance.charpath(g)[0] for g in data_pln_bin])

            # Add measure to results list
Example #25
0
def rich_feeder_peripheral(x, sc, stat='median'):
    """
    Calculates connectivity values in rich, feeder, and peripheral edges.

    Parameters
    ----------
    x : (N, N) numpy.ndarray
        Symmetric correlation or connectivity matrix
    sc : (N, N) numpy.ndarray
        Binary structural connectivity matrix
    stat : {'mean', 'median'}, optional
        Statistic to use over rich/feeder/peripheral links. Default: 'median'

    Returns
    -------
    rfp : (3, k) numpy.ndarray
        Array of median rich (0), feeder (1), and peripheral (2)
        values, defined by `x`. `k` is the maximum degree defined on `sc`.
    pvals : (3, k) numpy.ndarray
        p-value for each link, computed using Welch's t-test.
        Rich links are compared against non-rich links. Feeder links are
        compared against peripheral links. Peripheral links are compared
        against feeder links. T-test is one-sided.

    Author
    ------
    This code was written by Justine Hansen who promises to fix and even
    optimize the code should any issues arise, provided you let her know.
    """

    stats = ['mean', 'median']
    if stat not in stats:
        raise ValueError(f'Provided stat {stat} not valid.\
                         Must be one of {stats}')

    nnodes = len(sc)
    mask = np.triu(np.ones(nnodes), 1) > 0
    node_degree = degrees_und(sc)
    k = np.max(node_degree).astype(np.int64)
    rfp_label = np.zeros((len(sc[mask]), k))

    for degthresh in range(k):  # for each degree threshold
        hub_idx = np.where(node_degree >= degthresh)  # find the hubs
        hub = np.zeros([nnodes, 1])
        hub[hub_idx, :] = 1

        rfp = np.zeros([nnodes, nnodes])  # for each link, define rfp
        for edge1 in range(nnodes):
            for edge2 in range(nnodes):
                if hub[edge1] + hub[edge2] == 2:
                    rfp[edge1, edge2] = 1  # rich
                if hub[edge1] + hub[edge2] == 1:
                    rfp[edge1, edge2] = 2  # feeder
                if hub[edge1] + hub[edge2] == 0:
                    rfp[edge1, edge2] = 3  # peripheral
        rfp_label[:, degthresh] = rfp[mask]

    rfp = np.zeros([3, k])
    pvals = np.zeros([3, k])
    for degthresh in range(k):

        redfunc = np.median if stat == 'median' else np.mean
        for linktype in range(3):
            rfp[linktype, degthresh] = redfunc(
                x[mask][rfp_label[:, degthresh] == linktype + 1])

        # p-value (one-sided Welch's t-test)
        _, pvals[0,
                 degthresh] = ttest_ind(x[mask][rfp_label[:, degthresh] == 1],
                                        x[mask][rfp_label[:, degthresh] != 1],
                                        equal_var=False,
                                        alternative='greater')
        _, pvals[1,
                 degthresh] = ttest_ind(x[mask][rfp_label[:, degthresh] == 2],
                                        x[mask][rfp_label[:, degthresh] == 3],
                                        equal_var=False,
                                        alternative='greater')
        _, pvals[2,
                 degthresh] = ttest_ind(x[mask][rfp_label[:, degthresh] == 3],
                                        x[mask][rfp_label[:, degthresh] == 2],
                                        equal_var=False,
                                        alternative='greater')

    return rfp, pvals
Example #26
0
def test_degrees_und():
    x = load_sample()
    s = bct.degrees_und(bct.threshold_proportional(x, .26))
    assert np.sum(s) == 4916
    if group == 'patient':
        participants = ['z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z8']
    elif group == 'control':
        participants = ['c1', 'c2', 'c3', 'c5', 'c6', 'c7', 'c8']

    all_measures = np.empty(shape=[68, len(participants), 5])
    adjmats = np.empty(shape=[68, 68, len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(
            generate_ROI_file(FreeSurfer_ROI_file)).values
        labels, adjmat = remove_non_cortical_ROIs(labels, adjmat)
        all_measures[:, counter, 0] = bct.degrees_und(adjmat)
        all_measures[:, counter, 1] = bct.strengths_und(adjmat)
        all_measures[:, counter, 2] = bct.clustering_coef_wu(adjmat)
        all_measures[:, counter, 3] = bct.betweenness_wei(adjmat)
        all_measures[:, counter, 4] = bct.efficiency_wei(adjmat, local=True)
        adjmats[:, :, counter] = adjmat
        counter += 1

    mean_measures = np.mean(all_measures, axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures,
                               index=labels,
                               columns=[
                                   'patient.NodeDegree', 'patient.Strength',
                                   'patient.ClustCoeff', 'patient.BetweenCent',
                                   'patient.LocEff'