Ejemplo n.º 1
0
def calculateModularity(adj, labels):
    m = np.sum(adj)
    N = len(adj)
    degrees = bct.strengths_und(adj)
    Q = 0
    for r in range(N):
        for c in range(N):
            label_r = labels[r]
            label_c = labels[c]
            if label_r == label_c: rc = 1
            else: rc = 0
            Q = Q + adj[r, c] - ((degrees[r] * degrees[c]) / (2 * m) * rc)
    Q = Q / (2 * m)
    return Q
Ejemplo n.º 2
0
    def compute(self):
        weights = bct.strengths_und(self.g)
        self.stats['Strength'] = [v for v in weights]

        # Lobe grouping
        plots.create_bar_lobe_grouped_descending(
            "reports/plots/" + self.name + "_weighted_degree_descending.pdf",
            self.name + " Weighted Degree Centrality", 'Weighted Degree',
            weights)

        plt.show()

        average = statistics.mean(self.stats['Strength'])
        print("Average Strength: " + str(average) + "\n")
        return self.stats
Ejemplo n.º 3
0
def do_opt(adj,mods,option):
    if option=='global efficiency':
        return bct.efficiency_wei(adj)
    elif option=='local efficiency':
        return bct.efficiency_wei(adj,local=True)
    elif option=='average strength':
        return bct.strengths_und(adj)
    elif option=='clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option=='eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option=='binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option=='modularity':
        return bct.modularity_und(adj,mods)[1]
    elif option=='participation coefficient':
        return bct.participation_coef(adj,mods)
    elif option=='within-module degree':
        return bct.module_degree_zscore(adj,mods)
Ejemplo n.º 4
0
def do_opt(adj, mods, option):
    if option == 'global efficiency':
        return bct.efficiency_wei(adj)
    elif option == 'local efficiency':
        return bct.efficiency_wei(adj, local=True)
    elif option == 'average strength':
        return bct.strengths_und(adj)
    elif option == 'clustering coefficient':
        return bct.clustering_coef_wu(adj)
    elif option == 'eigenvector centrality':
        return bct.eigenvector_centrality_und(adj)
    elif option == 'binary kcore':
        return bct.kcoreness_centrality_bu(adj)[0]

    elif option == 'modularity':
        return bct.modularity_und(adj, mods)[1]
    elif option == 'participation coefficient':
        return bct.participation_coef(adj, mods)
    elif option == 'within-module degree':
        return bct.module_degree_zscore(adj, mods)
Ejemplo n.º 5
0
for subject in subjects:
    cls = np.load(source_folder +
                  "graph_data/%s_classic_pow_pln.npy" % subject).item()

    pln = np.load(source_folder +
                  "graph_data/%s_plan_pow_pln.npy" % subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.strengths_und(g) for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.strengths_und(g) for g in tmp]).mean(axis=0))
    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(y, n_folds=6, shuffle=True)

    model = joblib.load(source_folder +
                        "graph_data/sk_models/eigen_ada_pln_%s.plk" % band)
Ejemplo n.º 6
0
def test_strengths_und():
    x = load_sample()
    s = bct.strengths_und(x)
    assert np.allclose(np.sum(x), 38967.38702018)
for subject in subjects:
    cls = np.load(source_folder + "graph_data/%s_classic_pow_post.npy" %
                  subject).item()

    pln = np.load(source_folder + "graph_data/%s_plan_pow_post.npy" %
                  subject).item()

    cls_all.append(cls)
    pln_all.append(pln)

for k, band in enumerate(bands.keys()):
    data_cls = []
    for j in range(len(cls_all)):
        tmp = cls_all[j][band]
        data_cls.append(
            np.asarray([bct.strengths_und(g) for g in tmp]).mean(axis=0))
    data_pln = []
    for j in range(len(pln_all)):
        tmp = pln_all[j][band]
        data_pln.append(
            np.asarray([bct.strengths_und(g) for g in tmp]).mean(axis=0))

    data_cls = np.asarray(data_cls)
    data_pln = np.asarray(data_pln)

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(y, n_folds=6, shuffle=True)

    cv_params = {
    #conn_measure = ConnectivityMeasure(kind=kind,vectorize=True)
    conn_measure = ConnectivityMeasure(kind=kind)
    connectivity_biomarkers[kind] = conn_measure.fit_transform(ts_allsites)

# For each kind, all individual coefficients are stacked in a unique 2D matrix.
print('{0} correlation biomarkers for each subject.'.format(
    connectivity_biomarkers['tangent'].shape[1]))


#### Complex graph metrics
eig_cens=[]
clusterings=[]
Node_strengths=[]

for i in range(len(ts_allsites)):
    Node_strength=bct.strengths_und(connectivity_biomarkers['tangent'][i])
    Node_strengths.append(Node_strength)
    eig_cen = bct.centrality.eigenvector_centrality_und(connectivity_biomarkers['tangent'][i])
    eig_cens.append(eig_cen)
    clustering=bct.clustering_coef_wd(connectivity_biomarkers['tangent'][i])
    clusterings.append(clustering)
    
Node_strengths = np.stack(Node_strengths)
eig_cens=np.stack(eig_cens)
clusterings=np.stack(clusterings)


from nilearn.connectome import sym_matrix_to_vec
mat_connectivity= []

Ejemplo n.º 9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_length_matrix, args.in_conn_matrix])

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    if not args.append_json:
        assert_outputs_exist(parser, args, args.out_json)
    else:
        logging.debug('Using --append_json, make sure to delete {} '
                      'before re-launching a group analysis.'.format(
                          args.out_json))

    if args.append_json and args.overwrite:
        parser.error('Cannot use the append option at the same time as '
                     'overwrite.\nAmbiguous behavior, consider deleting the '
                     'output json file first instead.')

    conn_matrix = load_matrix_in_any_format(args.in_conn_matrix)
    len_matrix = load_matrix_in_any_format(args.in_length_matrix)

    if args.filtering_mask:
        mask_matrix = load_matrix_in_any_format(args.filtering_mask)
        conn_matrix *= mask_matrix
        len_matrix *= mask_matrix
    N = len_matrix.shape[0]

    if args.avg_node_wise:
        func_cast = avg_cast
    else:
        func_cast = list_cast

    gtm_dict = {}
    betweenness_centrality = bct.betweenness_wei(len_matrix) / ((N - 1) *
                                                                (N - 2))
    gtm_dict['betweenness_centrality'] = func_cast(betweenness_centrality)
    ci, gtm_dict['modularity'] = bct.modularity_louvain_und(conn_matrix,
                                                            seed=0)

    gtm_dict['assortativity'] = bct.assortativity_wei(conn_matrix, flag=0)
    gtm_dict['participation'] = func_cast(
        bct.participation_coef_sign(conn_matrix, ci)[0])
    gtm_dict['clustering'] = func_cast(bct.clustering_coef_wu(conn_matrix))

    gtm_dict['nodal_strength'] = func_cast(bct.strengths_und(conn_matrix))
    gtm_dict['local_efficiency'] = func_cast(
        bct.efficiency_wei(len_matrix, local=True))
    gtm_dict['global_efficiency'] = func_cast(bct.efficiency_wei(len_matrix))
    gtm_dict['density'] = func_cast(bct.density_und(conn_matrix)[0])

    # Rich club always gives an error for the matrix rank and gives NaN
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        tmp_rich_club = bct.rich_club_wu(conn_matrix)
    gtm_dict['rich_club'] = func_cast(tmp_rich_club[~np.isnan(tmp_rich_club)])

    # Path length gives an infinite distance for unconnected nodes
    # All of this is simply to fix that
    empty_connections = np.where(np.sum(len_matrix, axis=1) < 0.001)[0]
    if len(empty_connections):
        len_matrix = np.delete(len_matrix, empty_connections, axis=0)
        len_matrix = np.delete(len_matrix, empty_connections, axis=1)

    path_length_tuple = bct.distance_wei(len_matrix)
    gtm_dict['path_length'] = func_cast(path_length_tuple[0])
    gtm_dict['edge_count'] = func_cast(path_length_tuple[1])

    if not args.avg_node_wise:
        for i in empty_connections:
            gtm_dict['path_length'].insert(i, -1)
            gtm_dict['edge_count'].insert(i, -1)

    if args.small_world:
        gtm_dict['omega'], gtm_dict['sigma'] = omega_sigma(len_matrix)

    if os.path.isfile(args.out_json) and args.append_json:
        with open(args.out_json) as json_data:
            out_dict = json.load(json_data)
        for key in gtm_dict.keys():
            if isinstance(out_dict[key], list):
                out_dict[key].append(gtm_dict[key])
            else:
                out_dict[key] = [out_dict[key], gtm_dict[key]]
    else:
        out_dict = {}
        for key in gtm_dict.keys():
            out_dict[key] = [gtm_dict[key]]

    with open(args.out_json, 'w') as outfile:
        json.dump(out_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
Ejemplo n.º 10
0
threshPos = 0.4
threshNeg = -0.2
pearsonBinary = copy.deepcopy(pearson)
pearsonBinary[ np.abs(pearsonBinary) < threshPos] = 0
pearsonBinaryPos = copy.deepcopy(pearson)
pearsonBinaryPos[ pearsonBinaryPos < threshPos] = 0
pearsonBinaryNeg = copy.deepcopy(pearson)
pearsonBinaryNeg[ pearsonBinaryNeg > threshNeg] = 0



degree = bct.degrees_und(pearsonBinary[:,:,:]).T
degreePos = bct.degrees_und(pearsonBinaryPos[:,:,:]).T
degreeNeg = bct.degrees_und(pearsonBinaryNeg[:,:,:]).T

strength =  bct.strengths_und(np.abs( pearson[:,:,:])).T

strengthPos = np.zeros(shape = (windows, nchan))
strengthNeg = np.zeros(shape = (windows, nchan))
for win in range(windows):
    strengthPos[win, :], strengthNeg[win, :] , _, _= bct.strengths_und_sign( pearson[:,:,win])


strengthNegAbs = np.abs(strengthNeg)

#normalize
degreeNorm = degree / degree.max(axis=0)
degreePosNorm = degreePos / degreePos.max(axis=0)
degreeNegNorm = degreeNeg / degreeNeg.max(axis=0)
strengthNorm = strength / strength.max(axis=0)
strengthPosNorm = strengthPos / strengthPos.max(axis=0)
    if group == 'patient':
        participants = ['z1','z2','z3','z4','z5','z6','z8']
    elif group == 'control':
        participants = ['c1','c2','c3','c5','c6','c7','c8']
        
    all_measures = np.empty(shape=[68,len(participants),5])
    adjmats =  np.empty(shape=[68,68,len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(generate_ROI_file(FreeSurfer_ROI_file)).values
        labels,adjmat = remove_non_cortical_ROIs(labels,adjmat)
        all_measures[:,counter,0] = bct.degrees_und(adjmat)
        all_measures[:,counter,1] = bct.strengths_und(adjmat)
        all_measures[:,counter,2] = bct.clustering_coef_wu(adjmat)
        all_measures[:,counter,3] = bct.betweenness_wei(adjmat)
        all_measures[:,counter,4] = bct.efficiency_wei(adjmat,local=True)
        adjmats[:,:,counter] = adjmat
        counter += 1
        
        
    mean_measures = np.mean(all_measures,axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures, index=labels,columns=['patient.NodeDegree','patient.Strength','patient.ClustCoeff','patient.BetweenCent','patient.LocEff'])
        patient_measures = all_measures
        patient_adjmats = adjmats
    elif group == 'control':
        control = pd.DataFrame(mean_measures, index=labels,columns=['control.NodeDegree','control.Strength','control.ClustCoeff','control.BetweenCent','control.LocEff'])
        control_measures = all_measures
        cls_all.append(cls.mean(axis=0))
        pln_all.append(pln.mean(axis=0))

    cls_all_2 = np.asarray(cls_all)
    pln_all_2 = np.asarray(pln_all)

    full_matrix = np.concatenate([cls_all_2, pln_all_2], axis=0)

    threshold = np.median(full_matrix[np.nonzero(full_matrix)]) + \
        np.std(full_matrix[np.nonzero(full_matrix)])

    data_cls_bin = cls_all > threshold
    data_pln_bin = pln_all > threshold

    data_cls = [np.asarray([bct.strengths_und(g)
                            for g in data_cls_bin])]
    data_pln = [np.asarray([bct.strengths_und(g)
                            for g in data_pln_bin])]

    X = np.vstack([data_cls, data_pln])
    y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])

    cv = StratifiedKFold(n_splits=6, shuffle=True)

    cv_params = {"learning_rate": np.arange(0.1, 1.1, 0.1),
                 'n_estimators': np.arange(1, 80, 2)}

    grid = GridSearchCV(AdaBoostClassifier(),
                        cv_params,
                        scoring='roc_auc',
    #
    # WMD seems relatively fast, maybe 10min per threshold
    WMD[i, :] = bct.module_degree_zscore(tmp_matrix, CI)
    fn = 'completed WMD calculation for %s at:' % cost
    print(fn)
    print(datetime.now())
    #
    # EC[i,:] = bct.eigenvector_centrality_und(tmp_matrix)
    # fn = 'completed EC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())

    # GC[i,:], _ = bct.gateway_coef_sign(tmp_matrix, CI)
    # fn = 'completed GC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())
    #
    # SC[i,:] = bct.subgraph_centrality(tmp_matrix)
    # fn = 'completed SC calculation for %s at:' %cost
    # print(fn)
    # print(datetime.now())
    #
    # ST is fast to compute, ~10min per threshold
    ST[i, :] = bct.strengths_und(tmp_matrix)
    fn = 'completed ST calculation for %s at:' % cost
    print(fn)
    print(datetime.now())

print("All done at: ")
print(datetime.now())
def process(data):
    return bct.strengths_und(data)
Ejemplo n.º 15
0
        participants = ['z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z8']
    elif group == 'control':
        participants = ['c1', 'c2', 'c3', 'c5', 'c6', 'c7', 'c8']

    all_measures = np.empty(shape=[68, len(participants), 5])
    adjmats = np.empty(shape=[68, 68, len(participants)])
    counter = 0

    for participant in participants:
        adjmat = sio.loadmat(participant + '_FA.mat')
        adjmat = adjmat['adjacency_matrix']
        labels = get_parcellation_labels(
            generate_ROI_file(FreeSurfer_ROI_file)).values
        labels, adjmat = remove_non_cortical_ROIs(labels, adjmat)
        all_measures[:, counter, 0] = bct.degrees_und(adjmat)
        all_measures[:, counter, 1] = bct.strengths_und(adjmat)
        all_measures[:, counter, 2] = bct.clustering_coef_wu(adjmat)
        all_measures[:, counter, 3] = bct.betweenness_wei(adjmat)
        all_measures[:, counter, 4] = bct.efficiency_wei(adjmat, local=True)
        adjmats[:, :, counter] = adjmat
        counter += 1

    mean_measures = np.mean(all_measures, axis=1)
    if group == 'patient':
        patient = pd.DataFrame(mean_measures,
                               index=labels,
                               columns=[
                                   'patient.NodeDegree', 'patient.Strength',
                                   'patient.ClustCoeff', 'patient.BetweenCent',
                                   'patient.LocEff'
                               ])