コード例 #1
0
def plot_sliding_window(series, width, step):
    '''
    Plot a sliding-window graph
    
    Parameters
    ----------
    series:  time-series, array , 3-D
    width: window width
    step: window step size, in samples. If not provided, window and step size are equal.
    '''

    from nilearn import plotting
    import numpy as np
    from nilearn.connectome import sym_matrix_to_vec
    from nilearn.connectome import ConnectivityMeasure

    cut = sliding_window_2d(series, width, step)
    cut_matrix = np.zeros((cut.shape[0], cut.shape[2], cut.shape[2]))
    correlation_measure = ConnectivityMeasure(kind='correlation')

    for i in range(cut.shape[0]):
        matrix = correlation_measure.fit_transform([cut[i]])[0]
        cut_matrix[i, :, :] = matrix

    vectors = np.zeros(
        (cut_matrix.shape[0], sym_matrix_to_vec(cut_matrix[1]).shape[0]))

    for i in range(cut_matrix.shape[0]):
        vec = sym_matrix_to_vec(cut_matrix[i])
        vectors[i, :] = vec

    ax = np.corrcoef(vectors)
    plotting.plot_matrix(ax, title="width={} step={}".format(width, step))
コード例 #2
0
def CrossValidation(model, measure, y, skf, atalas):

    from sklearn.metrics import make_scorer
    from sklearn.metrics import accuracy_score, recall_score
    import numpy as np

    scoring = {
        'accuracy': make_scorer(accuracy_score),
        'sensitivity': make_scorer(recall_score),
        'specificity': make_scorer(recall_score, pos_label=0)
    }

    from nilearn.connectome import ConnectivityMeasure
    from nilearn.connectome import sym_matrix_to_vec

    conn_est = ConnectivityMeasure(kind=measure)
    conn_matrices = conn_est.fit_transform(atalas)
    X = sym_matrix_to_vec(conn_matrices)

    from sklearn.model_selection import cross_validate

    scores = cross_validate(model,
                            X,
                            y,
                            cv=skf,
                            scoring=scoring,
                            return_train_score=True)
    return [X, scores]
コード例 #3
0
def plot_rdm(rdm, mat=0, cmap=None):
    '''function to visualize RDM based rank transformed and scaled similarity values
        (only for plotting, raw/initial values remain unchanged'''

    from os.path import join as opj
    from scipy.io.matlab import loadmat
    from nilearn.connectome import sym_matrix_to_vec
    from scipy.stats import rankdata
    from nilearn.connectome import vec_to_sym_matrix
    from sklearn import preprocessing
    import pandas as pd
    import seaborn as sns
    import matplotlib.pyplot as plt

    if mat == 1:
        matfile = loadmat(rdm)
        rdm = matfile['rdm'][0][0]

    if cmap == None:
        cmap = 'Spectral_r'
    else:
        cmap = cmap

    rdm = pd.read_csv(rdm, sep=',')
    if 'Unnamed: 0' in rdm:
        del rdm['Unnamed: 0']

    categories = list(rdm.columns)

    rdm = rdm.as_matrix()

    rdm_vec = sym_matrix_to_vec(rdm)
    rdm_vec = rankdata(rdm_vec)

    min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1),
                                                copy=True)

    rdm_array = rdm_vec.reshape(-1, 2)
    rdm_array = min_max_scaler.fit_transform(rdm_array)
    rdm_array = rdm_array.flatten()
    rdm_rank_scale = vec_to_sym_matrix(rdm_array)

    ax = sns.heatmap(rdm_rank_scale,
                     xticklabels=categories,
                     yticklabels=categories,
                     cmap=cmap)
    ax.set_yticklabels(categories, rotation=0)
    ax.xaxis.tick_top()
    ax.set_xticklabels(categories, rotation=90)
    ax.collections[0].colorbar.set_label(
        "pairwise similarities (iMDS), rank transformed & scaled [0,1]")
    plt.tight_layout()
コード例 #4
0
    def test_fc_fd_vec(self):
        corr_vec: dict = self.result.outputs.fc_fd_corr_values
        self.assertIsInstance(corr_vec, dict)
        self.assertTrue(all(isinstance(key, str) for key in corr_vec.keys()))
        self.assertTrue(
            all(isinstance(value, np.ndarray) for value in corr_vec.values()))

        # value check
        from nilearn.connectome import sym_matrix_to_vec
        vec = sym_matrix_to_vec(self.group_corr_mat)
        corr, _ = QualityMeasures.calculate_fc_fd_correlations(
            self.group_conf_summary, vec)  # TODO: Replace method call
        assert_array_almost_equal(corr_vec[self.pipeline['name']], corr)
コード例 #5
0
ファイル: plots.py プロジェクト: MirjamSchneider/URIAL
def plot_rdm(rdm, mat=False, cmap="Spectral_r"):
    '''
    function to visualize RDM based rank transformed and scaled similarity values
    (only for plotting, raw/initial values remain unchanged)
    '''

    from os.path import join as opj
    from scipy.io.matlab import loadmat
    from scipy.stats import rankdata
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import minmax_scale
    import pandas as pd
    import seaborn as sns
    from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix

    if mat is True:
        matfile = loadmat(rdm)
        rdm = matfile['rdm'][0][0]

    if isinstance(rdm, str) is True:
        rdm = pd.read_csv(rdm, sep=',')
        if 'Unnamed: 0' in rdm:
            del rdm['Unnamed: 0']
    else:
        rdm = rdm

    categories = list(rdm.columns)

    rdm = rdm.as_matrix()

    rdm_vec = sym_matrix_to_vec(rdm)
    rdm_vec = rankdata(rdm_vec)

    rdm_array = rdm_vec.reshape(-1, 1)
    rdm_array = minmax_scale(rdm_array, (0, 1))
    rdm_array = rdm_array.flatten()
    rdm_rank_scale = vec_to_sym_matrix(rdm_array)

    y_categories = list(reversed(categories))

    ax = sns.heatmap(rdm_rank_scale,
                     xticklabels=categories,
                     yticklabels=y_categories,
                     cmap=cmap)
    ax.set_yticklabels(y_categories, rotation=0)
    ax.xaxis.tick_top()
    ax.set_xticklabels(categories, rotation=90)
    ax.collections[0].colorbar.set_label(
        "pairwise similarities, rank transformed & scaled [0,1]")
    plt.tight_layout()
コード例 #6
0
    def test_fc_fd_vec_clean(self):
        corr_vec: dict = self.result.outputs.fc_fd_corr_values_clean
        self.assertIsInstance(corr_vec, dict)
        self.assertTrue(all(isinstance(key, str) for key in corr_vec.keys()))
        self.assertTrue(
            all(isinstance(value, np.ndarray) for value in corr_vec.values()))

        # value check
        from nilearn.connectome import sym_matrix_to_vec
        group_corr_mat = self.group_corr_mat[[0, 2], :, :]
        vec: np.ndarray = sym_matrix_to_vec(group_corr_mat)
        corr, _ = QualityMeasures.calculate_fc_fd_correlations(
            self.group_conf_summary[self.group_conf_summary['include'] ==
                                    True], vec)
        assert_array_almost_equal(corr_vec[self.pipeline['name']], corr)
コード例 #7
0
    def test_edges_weight(self):
        edges_weight: dict = self.result.outputs.edges_weight
        self.assertIsInstance(edges_weight, dict)
        self.assertTrue(
            all(isinstance(key, str) for key in edges_weight.keys()))
        self.assertTrue(
            all(
                isinstance(value, np.ndarray)
                for value in edges_weight.values()))

        # value check
        from nilearn.connectome import sym_matrix_to_vec
        vec: np.ndarray = sym_matrix_to_vec(self.group_corr_mat)
        tested_edges_weight = vec.mean(axis=0)
        assert_array_equal(edges_weight[self.pipeline['name']],
                           tested_edges_weight)
def map_tangent(data, diag=False):
    """Transform to tangent space.

    Parameters
    ----------
    data: list of numpy.ndarray of shape(n_features, n_features)
        List of semi-positive definite matrices.
    diag: bool
        Whether to discard the diagonal elements before vectorizing. Default is
        False.

    Returns
    -------
    tangent: numpy.ndarray, shape(n_features * (n_features - 1) / 2)
    """
    mean_ = _geometric_mean(data, max_iter=30, tol=1e-7)
    whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x),
                                  mean_)
    tangent = [_map_eigenvalues(np.log, whitening_.dot(c).dot(whitening_))
               for c in data]
    tangent = np.array(tangent)

    return sym_matrix_to_vec(tangent, discard_diagonal=diag)
    clusterings.append(clustering)
    
Node_strengths = np.stack(Node_strengths)
eig_cens=np.stack(eig_cens)
clusterings=np.stack(clusterings)


from nilearn.connectome import sym_matrix_to_vec
mat_connectivity= []


matrix=connectivity_biomarkers['tangent']

    
for mat in matrix:
    mat_connectivity.append(sym_matrix_to_vec(mat,discard_diagonal=True))
mat_connectivity = np.stack(mat_connectivity)

print(mat_connectivity.shape)


# ### Projection of statistical features

# In[6]:


## structural connectivity

from pathlib import Path
from scipy import io as sio
from pygsp import graphs
コード例 #10
0
y=[]
for i in subjects_timeseries.keys():
    y.append(labels[i])
type(y)


# In[ ]:





for measure in measures:
    conn_est = ConnectivityMeasure(kind=measure)
    conn_matrices = conn_est.fit_transform(subjects_timeseries.values())
    X = sym_matrix_to_vec(conn_matrices)
    
    for name, predictor in predictors:
#         print (measure)
#         print (name)
        print(measure, name, np.mean(cross_val_score(predictor, X, np.array(y), cv=stratified_shuffle_split.split(X,y))))
#       print( cross_val_score(predictor, X, np.array(y), cv=stratified_shuffle_split.split(X,y)))
      
#         classifier = predictor
#         for train_index,test_index in stratified_shuffle_split.split(X,y):
#             predictor.fit(X[train_index],y[train_index])
#             y_score = predictor.decision_function(X[test_index])
#             average_precision = average_precision_score(y[test_index], y_score)
#             print('Average precision-recall score: {0:0.2f}'.format(
#                 average_precision))
#             disp = plot_precision_recall_curve(predictor, X[test_index], y[test_index])
コード例 #11
0
where each row is a participant and each column is a connection'''
vector_motor = np.zeros((N_SUBJECTS, 64620))
vector_wm = np.zeros((N_SUBJECTS, 64620))
vector_gambling = np.zeros((N_SUBJECTS, 64620))
vector_emotion = np.zeros((N_SUBJECTS, 64620))
vector_language = np.zeros((N_SUBJECTS, 64620))
vector_relational = np.zeros((N_SUBJECTS, 64620))
vector_social = np.zeros((N_SUBJECTS, 64620))
'''import a package to extract the diagonal of the correlation matrix, as well as
initializing a list of the subset of subjects. It is a neccesary step in appending the list 
of subjects to the connection data'''
from nilearn.connectome import sym_matrix_to_vec
subject_list = np.array(np.unique(range(339)))

for subject in range(subject_list.shape[0]):
    vector_motor[subject, :] = sym_matrix_to_vec(
        fc_matrix_motor[subject, :, :], discard_diagonal=True)
    vector_motor[subject, :] = fc_matrix_motor[subject][np.triu_indices_from(
        fc_matrix_motor[subject], k=1)]
for subject in range(subject_list.shape[0]):
    vector_wm[subject, :] = sym_matrix_to_vec(fc_matrix_wm[subject, :, :],
                                              discard_diagonal=True)
    vector_wm[subject, :] = fc_matrix_wm[subject][np.triu_indices_from(
        fc_matrix_wm[subject], k=1)]
for subject in range(subject_list.shape[0]):
    vector_gambling[subject, :] = sym_matrix_to_vec(
        fc_matrix_gambling[subject, :, :], discard_diagonal=True)
    vector_gambling[subject, :] = fc_matrix_gambling[subject][
        np.triu_indices_from(fc_matrix_gambling[subject], k=1)]
for subject in range(subject_list.shape[0]):
    vector_emotion[subject, :] = sym_matrix_to_vec(
        fc_matrix_emotion[subject, :, :], discard_diagonal=True)
コード例 #12
0
ファイル: netmotifs.py プロジェクト: neurolibre/PyNets
def compare_motifs(struct_mat, func_mat, name, namer_dir, bins=20, N=4):
    '''
    Compare motif structure and population across structural and functional
    graphs to achieve a homeostatic absolute threshold of each that optimizes
    multiplex community detection and analysis.

    Parameters
    ----------
    in_mat : ndarray
        M x M Connectivity matrix
    thr : float
        Absolute threshold [0, 1].
    mlib : list
        List of motif classes.

    Returns
    -------
    mf : ndarray
        1D vector listing the total motifs of size N for each
        class of mlib.

    References
    ----------
    .. [1] Battiston, F., Nicosia, V., Chavez, M., & Latora, V. (2017).
      Multilayer motif analysis of brain networks. Chaos.
      https://doi.org/10.1063/1.4979282

    '''
    from pynets.stats.netmotifs import adaptivethresh
    from pynets.core.thresholding import threshold_absolute
    from pynets.core.thresholding import standardize
    from scipy import spatial
    from nilearn.connectome import sym_matrix_to_vec
    import pandas as pd
    import gc

    mlib = ['1113', '1122', '1223', '2222', '2233', '3333']

    # Standardize structural graph
    struct_mat = standardize(struct_mat)
    dims_struct = struct_mat.shape[0]
    struct_mat[range(dims_struct), range(dims_struct)] = 0
    at_struct = adaptivethresh(struct_mat, float(0.0), mlib, N)
    print("%s%s%s" %
          ('Layer 1 (structural) has: ', np.sum(at_struct), ' total motifs'))

    # Functional graph threshold window
    func_mat = standardize(func_mat)
    dims_func = func_mat.shape[0]
    func_mat[range(dims_func), range(dims_func)] = 0
    tmin_func = func_mat.min()
    tmax_func = func_mat.max()
    threshes_func = np.linspace(tmin_func, tmax_func, bins)

    assert np.all(
        struct_mat == struct_mat.T), "Structural Matrix must be symmetric"
    assert np.all(
        func_mat == func_mat.T), "Functional Matrix must be symmetric"

    # Count motifs
    print("%s%s%s%s" % ('Mining ', N, '-node motifs: ', mlib))
    motif_dict = {}
    motif_dict['struct'] = {}
    motif_dict['func'] = {}

    mat_dict = {}
    mat_dict['struct'] = sym_matrix_to_vec(struct_mat, discard_diagonal=True)
    mat_dict['funcs'] = {}
    for thr_func in threshes_func:
        # Count
        at_func = adaptivethresh(func_mat, float(thr_func), mlib, N)
        motif_dict['struct']["%s%s" %
                             ('thr-', np.round(thr_func, 4))] = at_struct
        motif_dict['func']["%s%s" % ('thr-', np.round(thr_func, 4))] = at_func
        mat_dict['funcs']["%s%s" %
                          ('thr-', np.round(thr_func, 4))] = sym_matrix_to_vec(
                              threshold_absolute(func_mat, thr_func),
                              discard_diagonal=True)

        print("%s%s%s%s%s" %
              ('Layer 2 (functional) with absolute threshold of: ',
               np.round(thr_func,
                        2), ' yields ', np.sum(at_func), ' total motifs'))
        gc.collect()

    df = pd.DataFrame(motif_dict)

    for idx in range(len(df)):
        df.set_value(
            df.index[idx], 'motif_dist',
            spatial.distance.cosine(df['struct'][idx], df['func'][idx]))

    df = df[pd.notnull(df['motif_dist'])]

    for idx in range(len(df)):
        df.set_value(
            df.index[idx], 'graph_dist_cosine',
            spatial.distance.cosine(
                mat_dict['struct'].reshape(-1, 1),
                mat_dict['funcs'][df.index[idx]].reshape(-1, 1)))
        df.set_value(
            df.index[idx], 'graph_dist_correlation',
            spatial.distance.correlation(
                mat_dict['struct'].reshape(-1, 1),
                mat_dict['funcs'][df.index[idx]].reshape(-1, 1)))

    df['struct_func_3333'] = np.zeros(len(df))
    df['struct_func_2233'] = np.zeros(len(df))
    df['struct_func_2222'] = np.zeros(len(df))
    df['struct_func_1223'] = np.zeros(len(df))
    df['struct_func_1122'] = np.zeros(len(df))
    df['struct_func_1113'] = np.zeros(len(df))
    df['struct_3333'] = np.zeros(len(df))
    df['func_3333'] = np.zeros(len(df))
    df['struct_2233'] = np.zeros(len(df))
    df['func_2233'] = np.zeros(len(df))
    df['struct_2222'] = np.zeros(len(df))
    df['func_2222'] = np.zeros(len(df))
    df['struct_1223'] = np.zeros(len(df))
    df['func_1223'] = np.zeros(len(df))
    df['struct_1122'] = np.zeros(len(df))
    df['func_1122'] = np.zeros(len(df))
    df['struct_1113'] = np.zeros(len(df))
    df['func_1113'] = np.zeros(len(df))

    for idx in range(len(df)):
        df.set_value(df.index[idx], 'struct_3333', df['struct'][idx][-1])
        df.set_value(df.index[idx], 'func_3333', df['func'][idx][-1])

        df.set_value(df.index[idx], 'struct_2233', df['struct'][idx][-2])
        df.set_value(df.index[idx], 'func_2233', df['func'][idx][-2])

        df.set_value(df.index[idx], 'struct_2222', df['struct'][idx][-3])
        df.set_value(df.index[idx], 'func_2222', df['func'][idx][-3])

        df.set_value(df.index[idx], 'struct_1223', df['struct'][idx][-4])
        df.set_value(df.index[idx], 'func_1223', df['func'][idx][-4])

        df.set_value(df.index[idx], 'struct_1122', df['struct'][idx][-5])
        df.set_value(df.index[idx], 'func_1122', df['func'][idx][-5])

        df.set_value(df.index[idx], 'struct_1113', df['struct'][idx][-6])
        df.set_value(df.index[idx], 'func_1113', df['func'][idx][-6])

    df['struct_func_3333'] = np.abs(df['struct_3333'] - df['func_3333'])
    df['struct_func_2233'] = np.abs(df['struct_2233'] - df['func_2233'])
    df['struct_func_2222'] = np.abs(df['struct_2222'] - df['func_2222'])
    df['struct_func_1223'] = np.abs(df['struct_1223'] - df['func_1223'])
    df['struct_func_1122'] = np.abs(df['struct_1122'] - df['func_1122'])
    df['struct_func_1113'] = np.abs(df['struct_1113'] - df['func_1113'])

    df = df.drop(columns=['struct', 'func'])

    df = df.loc[~(df == 0).all(axis=1)]

    df = df.sort_values(by=[
        'motif_dist', 'graph_dist_cosine', 'graph_dist_correlation',
        'struct_func_3333', 'struct_func_2233', 'struct_func_2222',
        'struct_func_1223', 'struct_func_1122', 'struct_func_1113',
        'struct_3333', 'func_3333', 'struct_2233', 'func_2233', 'struct_2222',
        'func_2222', 'struct_1223', 'func_1223', 'struct_1122', 'func_1122',
        'struct_1113', 'func_1113'
    ],
                        ascending=[
                            True, True, False, False, False, False, False,
                            False, False, False, False, False, False, False,
                            False, False, False, False, False, False, False
                        ])

    # Take the top 25th percentile
    df = df.head(int(0.25 * len(df)))
    best_threshes = []
    best_mats = []
    best_multigraphs = []
    for key in list(df.index):
        func_mat_tmp = func_mat.copy()
        struct_mat_tmp = struct_mat.copy()
        struct_thr = float(key.split('-')[-1])
        func_thr = float(key.split('-')[-1])
        best_threshes.append(str(func_thr))

        func_mat_tmp[func_mat_tmp < func_thr] = 0
        struct_mat_tmp[struct_mat_tmp < struct_thr] = 0
        best_mats.append((func_mat_tmp, struct_mat_tmp))

        mG = build_mx_multigraph(func_mat, struct_mat, f"{name}_{key}",
                                 namer_dir)
        best_multigraphs.append(mG)

    mg_dict = dict(zip(best_threshes, best_multigraphs))
    g_dict = dict(zip(best_threshes, best_mats))

    return mg_dict, g_dict
コード例 #13
0
def rdm_compare(rdms, models, comp=None, plot=None):
    '''function to compare target and model
        rmds'''

    global dict_rdms
    global DefaultListOrderedDict

    from glob import glob
    import pandas as pd
    from collections import OrderedDict
    from scipy.spatial import distance
    from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix
    from scipy.stats import rankdata, spearmanr, kendalltau, pearsonr, mstats
    import numpy as np
    from itertools import combinations
    import pickle
    import seaborn as sns
    import matplotlib.pyplot as plt
    import copy

    class DefaultListOrderedDict(OrderedDict):
        def __missing__(self,k):
            self[k] = []
            return self[k]

    if isinstance(rdms, str) is True:
        with open(rdms, 'rb') as f:
            dict_rdms = pickle.load(f)
        target_rdms = copy.deepcopy(dict_rdms['rdm'])
        target_conds = target_rdms[0].keys()
    else:
        target_rdms = rdms
        target_conds = rdms[0].keys()

    if isinstance(models, str) is True:
        with open(models, 'rb') as f:
            dict_models = pickle.load(f)
            models = dict_models['rdm']
            model_ids = dict_models['id']
    else:
        models = models

    for rdm in dict_models['rdm']:
        if 'Unnamed: 0' in rdm:
            del rdm['Unnamed: 0']


    for index, rdm in enumerate(target_rdms):
        target_rdms[index] = target_rdms[index].as_matrix()

    global rdm_avg

    list_cor_rdm=list(range(0,len(target_rdms)))
    list_p=list(range(0,len(target_rdms)))
    target_rdms_trans=list(range(0,len(target_rdms)))

    if comp is None:
        print('rdm values will not be transformed')

        rdm_avg = pd.DataFrame(np.mean(target_rdms, axis=0), columns=target_conds)

        for index, part_rdm in enumerate(target_rdms):
            list_cor_rdm[index], list_p[index] = kendalltau(part_rdm.flatten(), rdm_avg.as_matrix().flatten())

        list_cor_sub = list()
        list_cor_rdm_sub = list()
        list_p_sub = list()

        for index, part in enumerate(target_rdms):
            tmp_rdms = target_rdms.copy()
            tmp_part = target_rdms[index]
            tmp_rdms.pop(index)
            tmp_rdm_avg = np.mean(tmp_rdms, axis=0)
            list_cor_sub.append(kendalltau(tmp_part.flatten(), tmp_rdm_avg.flatten()))

        for i, cor in enumerate(list_cor_sub):
            list_cor_rdm_sub.append(cor.correlation)
            list_p_sub.append(cor.pvalue)

    elif comp == 'spearman':
        for index, rdm in enumerate(target_rdms):

            target_rdms_trans[index] = vec_to_sym_matrix(rankdata(sym_matrix_to_vec(rdm)))
            rdm_avg = pd.DataFrame(np.mean(target_rdms_trans, axis=0), columns=target_conds)

        for index, part_rdm in enumerate(target_rdms_trans):
            list_cor_rdm[index], list_p[index] = spearmanr(part_rdm.flatten(), rdm_avg.as_matrix().flatten())

        list_cor_sub = list()
        list_cor_rdm_sub = list()
        list_p_sub = list()

        for index, part in enumerate(target_rdms_trans):
            tmp_rdms = target_rdms_trans.copy()
            tmp_part = target_rdms_trans[index]
            tmp_rdms.pop(index)
            tmp_rdm_avg = np.mean(tmp_rdms, axis=0)
            list_cor_sub.append(spearmanr(tmp_part.flatten(), tmp_rdm_avg.flatten()))

        for i, cor in enumerate(list_cor_sub):
            list_cor_rdm_sub.append(cor.correlation)
            list_p_sub.append(cor.pvalue)

    elif comp == 'pearson':
        for index, rdm in enumerate(target_rdms):

            target_rdms_trans[index] = vec_to_sym_matrix(mstats.zscore(sym_matrix_to_vec(rdm)))
            rdm_avg = pd.DataFrame(np.mean(target_rdms_trans, axis=0), columns=target_conds)

        for index, part_rdm in enumerate(target_rdms_trans):
            list_cor_rdm[index], list_p[index] = pearsonr(part_rdm.flatten(), rdm_avg.as_matrix().flatten())

        list_cor_sub = list()
        list_cor_rdm_sub = list()
        list_p_sub = list()

        for index, part in enumerate(target_rdms_trans):
            tmp_rdms = target_rdms_trans.copy()
            tmp_part = target_rdms_trans[index]
            tmp_rdms.pop(index)
            tmp_rdm_avg = np.mean(tmp_rdms, axis=0)
            list_cor_sub.append(pearsonr(tmp_part.flatten(), tmp_rdm_avg.flatten()))

        for i, cor in enumerate(list_cor_sub):
            list_cor_rdm_sub.append(cor[0])
            list_p_sub.append(cor[1])

    upper_noise_ceiling = np.mean(list_cor_rdm)
    lower_noise_ceiling=np.mean(list_cor_rdm_sub)

    model_comp = pd.DataFrame(columns=['participant', 'models', 'cor'], index=np.arange(len(dict_models['id'])*len(dict_rdms['id'])))
    model_comp['participant']=dict_rdms['id']*len(dict_models['id'])
    model_comp['models']=sorted(dict_models['id']*len(dict_rdms['id']))

    list_cor_models=list()

    snd_rdms=list()
    snd_rdms.append(rdm_avg.as_matrix())
    for mod_rdm in models:
        snd_rdms.append(mod_rdm.as_matrix())

    ids_rdms=list()
    ids_rdms.append('group average')
    for mod_ids in model_ids:
        ids_rdms.append(mod_ids)

    if comp is None:
        for index, model_rdm in enumerate(dict_models['rdm']):
            for i, sub_rdm in enumerate(target_rdms):
            list_cor_models.append(kendalltau(sub_rdm.flatten(), model_rdm.as_matrix().flatten()).correlation)
            rdms_dist = [kendalltau(x.flatten(), y.flatten()).correlation for x, y in combinations(snd_rdms, 2)]
            rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms)
    elif comp == 'spearman':
        for index, model_rdm in enumerate(dict_models['rdm']):
            for i, sub_rdm in enumerate(target_rdms_trans):
                list_cor_models.append(spearmanr(sub_rdm.flatten(), model_rdm.as_matrix().flatten()).correlation)
                rdms_dist = [spearmanr(x.flatten(), y.flatten()).correlation for x, y in combinations(snd_rdms, 2)]
                rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms)
    elif comp == 'pearson':
        for index, model_rdm in enumerate(dict_models['rdm']):
            for i, sub_rdm in enumerate(target_rdms_trans):
                list_cor_models.append(pearsonr(sub_rdm.flatten(), model_rdm.as_matrix().flatten())[0])
                rdms_dist = [pearsonr(x.flatten(), y.flatten())[0] for x, y in combinations(snd_rdms, 2)]
                rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids_rdms)

    model_comp['cor']=list_cor_models

    if plot is None:
        print('results will no be plotted')
    elif plot == 'bar':
        ax=sns.barplot(x=model_comp['models'], y=model_comp['cor'], data=model_comp)
        plt.plot(np.linspace(-20, 120, 1000), [upper_noise_ceiling] * 1000, 'r', alpha=0.1)
        plt.plot(np.linspace(-20, 120, 1000), [lower_noise_ceiling] * 1000, 'r', alpha=0.1)
        rect = plt.Rectangle((-20, lower_noise_ceiling), 10000, (upper_noise_ceiling - lower_noise_ceiling), color='r',
                             alpha=0.5)
        ax.set_xticklabels(labels=list(dict_models['id']))
        if comp is None or 'spearman':
            ax.set(ylabel='spearman correlation with target RDM')
        if comp == 'pearson':
            ax.set(ylabel='pearson correlation with target RDM')
        if comp == 'kendalltau':
            ax.set(ylabel='kendall tau a correlation with target RDM')
        ax.add_patch(rect)
        plt.tight_layout()
    elif plot == 'violin':
        ax=sns.violinplot(x=model_comp['models'], y=model_comp['cor'], data=model_comp)
        plt.plot(np.linspace(-20, 120, 1000), [upper_noise_ceiling] * 1000, 'r', alpha=0.1)
        plt.plot(np.linspace(-20, 120, 1000), [lower_noise_ceiling] * 1000, 'r', alpha=0.1)
        rect = plt.Rectangle((-20, lower_noise_ceiling), 10000, (upper_noise_ceiling - lower_noise_ceiling), color='r',
                             alpha=0.5)
        ax.set_xticklabels(labels=list(dict_models['id']))
        if comp is None or 'spearman':
            ax.set(ylabel='spearman correlation with target RDM')
        if comp == 'pearson':
            ax.set(ylabel='pearson correlation with target RDM')
        if comp == 'kendalltau':
            ax.set(ylabel='kendall tau a correlation with target RDM')
        ax.add_patch(rect)
        plt.tight_layout()

    return rdm_avg, model_comp, rdms_dist
コード例 #14
0
    def _run_interface(self, runtime):
        # Loading data
        group_corr_mat = np.load(
            self.inputs.group_corr_mat)  # array with matrices for all runs
        group_conf_summary = pd.read_csv(
            self.inputs.group_conf_summary,
            sep='\t')  # motion summary for all runs
        pipeline_name = self.inputs.pipeline_name
        distance_vector = sym_matrix_to_vec(
            np.load(self.inputs.distance_matrix))  # load distance matrix

        # Plotting motion
        colour = ["#fe6863", "#00a074"]
        sns.set_palette(colour)
        fig = motion_plot(group_conf_summary)
        fig.savefig(join(self.inputs.output_dir,
                         f"motion_criterion_{pipeline_name}.svg"),
                    dpi=300)

        # Creating vectors with subject filter
        all_sub_no = len(group_conf_summary)
        icluded_sub = group_conf_summary["include"]
        excluded_sub_no = all_sub_no - sum(
            icluded_sub)  # number of subjects excluded from analyses

        # Create dictionary describing full sampple and sample after exluding highly motion runs
        included = {
            f"All subjects (n = {all_sub_no})":
            [np.ones((all_sub_no), dtype=bool), False, all_sub_no, "All"],
            f"After excluding {excluded_sub_no} high motion subjects (n = {all_sub_no - excluded_sub_no})":
            [
                group_conf_summary["include"].values.astype("bool"), True,
                all_sub_no - excluded_sub_no, "No_high_motion"
            ]
        }

        group_corr_vec = sym_matrix_to_vec(group_corr_mat)
        n_edges = group_corr_vec.shape[1]

        fc_fd_corr, fc_fd_pval = (np.zeros(n_edges) for _ in range(2))
        fc_fd_summary = []
        edges_weight = {}
        edges_weight_clean = {}

        for key, value in included.items():

            for i in range(n_edges):
                corr = pearsonr(group_corr_vec[value[0], i],
                                group_conf_summary['mean_fd'].values[value[0]])
                fc_fd_corr[i] = corr[0]  # Pearson's r values
                fc_fd_pval[i] = corr[1]  # p-values

            fc_fd_corr = np.nan_to_num(fc_fd_corr)  # TODO: write exception

            # Calculate correlation between FC-FD r values and distance vector
            distance_dependence = pearsonr(fc_fd_corr, distance_vector)[0]

            # Store summary measure
            fc_fd_summary.append({
                "pipeline":
                pipeline_name,
                "perc_fc_fd_uncorr":
                np.sum(fc_fd_pval < 0.5) / len(fc_fd_pval) * 100,
                "pearson_fc_fd":
                np.median(fc_fd_corr),
                "distance_dependence":
                distance_dependence,
                "tdof_loss":
                group_conf_summary["n_conf"].mean(),
                "cleaned":
                value[1],
                "subjects":
                value[3],
                "sub_no":
                value[2]
            })
            # For cleaned dataset
            if value[1]:
                edges_weight_clean = {
                    pipeline_name: group_corr_vec[value[0]].mean(axis=0)
                }

            # For full dataset
            if not value[1]:
                edges_weight = {
                    pipeline_name: group_corr_vec[value[0]].mean(axis=0)
                }

            # Plotting FC and FC-FD correlation matrices
            fc_fd_corr_mat = vec_to_sym_matrix(fc_fd_corr)
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))

            fig1 = ax1.imshow(group_corr_mat[value[0]].mean(axis=0),
                              vmin=-1,
                              vmax=1,
                              cmap="RdBu_r")
            ax1.set_title(f"{pipeline_name}: mean FC")
            fig.colorbar(fig1, ax=ax1)

            fig2 = ax2.imshow(fc_fd_corr_mat, vmin=-1, vmax=1, cmap="RdBu_r")
            ax2.set_title(f"{pipeline_name}: FC-FD correlation")
            fig.colorbar(fig2, ax=ax2)
            fig.suptitle(f"{pipeline_name}: {key}")

            fig.savefig(join(
                self.inputs.output_dir,
                f"FC_FD_corr_mat_{pipeline_name}_{value[3].lower()}.png"),
                        dpi=300)

            exclude_list = [
                f"sub-{x + 1:02}" for x in group_conf_summary[
                    group_conf_summary['include'] == 1]['subject']
            ]

            self._results["fc_fd_summary"] = fc_fd_summary
            self._results["edges_weight"] = edges_weight
            self._results["edges_weight_clean"] = edges_weight_clean
            self._results["exclude_list"] = exclude_list

        return runtime
コード例 #15
0
ファイル: CJR ICA.py プロジェクト: coreyjr2/Python-Tutorials
'''Assemble all FC matrices into one object'''
from nilearn.connectome import sym_matrix_to_vec
all_fc = np.stack((sz_matrix, bp_matrix, cn_matrix, adhd_matrix, test_sz_matrix, test_bp_matrix, test_adhd_matrix, test_cn_matrix))

#concat all the TS into one object
concat_ts = np.stack((rest_cn, rest_sz,rest_bp,rest_adhd, 
                test_rest_cn, test_rest_sz, test_rest_bp, test_rest_adhd))


concat_ts = np.asarray(concat_ts)

connectivity = ConnectivityMeasure(kind='correlation', vectorize=True)
connectivity.fit_transform(concat_ts)

for subject in all_fc:
   sym_matrix_to_vec(subject, discard_diagonal=True)
        
   
    
sz_vector = sym_matrix_to_vec(sz_matrix, discard_diagonal=True)
bp_vector = sym_matrix_to_vec(bp_matrix, discard_diagonal=True)
adhd_vector = sym_matrix_to_vec(adhd_matrix, discard_diagonal=True)
cn_vector = sym_matrix_to_vec(cn_matrix, discard_diagonal=True)
test_sz_vector = sym_matrix_to_vec(test_sz_matrix, discard_diagonal=True)
test_bp_vector = sym_matrix_to_vec(test_bp_matrix, discard_diagonal=True)
test_adhd_vector = sym_matrix_to_vec(test_adhd_matrix, discard_diagonal=True)
test_cn_vector = sym_matrix_to_vec(test_cn_matrix, discard_diagonal=True)
   
concat_vector = np.stack((sz_vector, bp_vector,adhd_vector ,cn_vector, 
                test_sz_vector, test_bp_vector, test_adhd_vector, test_cn_vector))
コード例 #16
0
for sub in range(sub_n):
    for i, cond in enumerate(conditions):
    
        task = timeseries_all[sub, design_matrix[cond].astype("bool").astype("bool"), :]
        
        correlation_measure = ConnectivityMeasure(kind="correlation")
    
        fc = correlation_measure.fit_transform([task])[0]
        np.fill_diagonal(fc, 0)
        
        correlation_matrices[sub, i, :, :] = fc

correlation_matrices.shape

# Calculate edgewise distances.
zero_back = sym_matrix_to_vec(correlation_matrices[:,0,:,:], discard_diagonal = True)
two_back = sym_matrix_to_vec(correlation_matrices[:,1,:,:], discard_diagonal = True)

stat, pvalues = stats.ttest_rel(zero_back, two_back)

import statsmodels.stats.multitest as ssm

_, pvals_corrected, _, _ = ssm.multipletests(pvalues, alpha = 0.05, method = "fdr_bh")

pvals_corrected_thr = np.zeros((len(pvals_corrected)))

pvals = np.array([0 if p >= 0.05 else 1 for p in pvals_corrected])
sum(pvals)

wei_vector = stat * pvals 
diag = np.zeros((264))
コード例 #17
0
def rdm_dist(rdms, comp=None, order=None):
    '''function to compute distances between all
        RDMs in a given dictionary'''

    #global DefaultListOrderedDict
    from collections import OrderedDict

    class DefaultListOrderedDict(OrderedDict):
        def __missing__(self, k):
            self[k] = []
            return self[k]

    import pandas as pd
    from collections import OrderedDict
    import pickle
    from scipy.spatial import distance
    from scipy.stats import pearsonr, spearmanr, rankdata, mstats
    from itertools import combinations
    from nilearn.connectome import sym_matrix_to_vec
    import numpy as np

    if isinstance(rdms, str) is True:
        with open(rdms, 'rb') as f:
            dict_rdms = pickle.load(f)
        rdms = dict_rdms['rdm']
        ids = dict_rdms['id']
    else:
        dict_rdms = rdms
        rdms = dict_rdms['rdm']
        ids = dict_rdms['id']

    if order is None:
        print(
            'RDM comparisons will be written to the results data frame in the order they are found in the pkl file'
        )
    elif order is not None:
        print(
            'RDM comparisons will be written to the results data frame in the order specified by the user'
        )
        order = order
        df_order = pd.DataFrame()
        df_order['rdms'] = rdms
        df_order['rdm_id'] = ids
        df_order.index = ids

        df_order_user = pd.DataFrame(df_order.reindex(order))

        rdms = df_order_user['rdms']
        ids = df_order_user['rdm_id']

    global rdms_dist

    if comp is None or comp == 'euclidean':
        rdms_dist = [
            distance.euclidean(
                sym_matrix_to_vec(x.to_numpy(), discard_diagonal=True),
                sym_matrix_to_vec(y.to_numpy(), discard_diagonal=True))
            for x, y in combinations(rdms, 2)
        ]
        rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids)
    elif comp == 'spearman':
        for index, rdm in enumerate(rdms):
            rdms[index] = rankdata(
                sym_matrix_to_vec(rdm.values, discard_diagonal=True))
        rdms_dist = [
            spearmanr(x, y).correlation for x, y in combinations(rdms, 2)
        ]
        rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids)
        np.fill_diagonal(rdms_dist.values, 1)
        #rdms_dist = rdms_dist.mask(rdms_dist.values > -1.05, 1 - rdms_dist.values)
    elif comp == 'pearson':
        for index, rdm in enumerate(rdms):
            rdms[index] = mstats.zscore(
                sym_matrix_to_vec(rdm.to_numpy(), discard_diagonal=True))
        rdms_dist = [pearsonr(x, y)[0] for x, y in combinations(rdms, 2)]
        rdms_dist = pd.DataFrame(distance.squareform(rdms_dist), columns=ids)
        np.fill_diagonal(rdms_dist.values, 1)
        #rdms_dist = rdms_dist.mask(rdms_dist.values > -1.05, 1 - rdms_dist.values)

    return rdms_dist
コード例 #18
0
ファイル: plots.py プロジェクト: alexprz/URIAL
def plot_rdm(rdm, mat=False, model=False, level=None, comp=None, cmap="Spectral_r"):
    '''
    function to visualize RDM based rank transformed and scaled similarity values
    (only for plotting, raw/initial values remain unchanged)
    '''

    from scipy.io.matlab import loadmat
    from scipy.stats import rankdata
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import minmax_scale
    import pandas as pd
    import seaborn as sns
    from nilearn.connectome import sym_matrix_to_vec, vec_to_sym_matrix

    if mat is True:
        matfile = loadmat(rdm)
        rdm = matfile['rdm'][0][0]

    if isinstance(rdm, str) is True:
        rdm = pd.read_csv(rdm, sep=',')
        if 'Unnamed: 0' in rdm:
            del rdm['Unnamed: 0']
    else:
        rdm=rdm

    categories = list(rdm.columns)
    y_categories = list(categories)

    if model is False and level == '2nd':

        ax = sns.heatmap(rdm, xticklabels=categories, yticklabels=y_categories, cmap=cmap, vmin=-1, vmax=1)
        ax.set_yticklabels(y_categories, rotation=0)
        ax.xaxis.tick_top()
        ax.set_xticklabels(categories, rotation=90)
        if comp is None:
            ax.collections[0].colorbar.set_label("correlations between RDMs")
        if comp == 'kendalltaua':
            ax.collections[0].colorbar.set_label("correlations between RDMs [kendall tau]")
        if comp == 'spearman':
            ax.collections[0].colorbar.set_label("correlations between RDMs [spearman]")
        if comp == 'pearson':
            ax.collections[0].colorbar.set_label("correlations between RDMs [pearson]")
        plt.tight_layout()



    if model is False and level is None:

        rdm = rdm.to_numpy()

        rdm_vec = sym_matrix_to_vec(rdm)
        rdm_vec = rankdata(rdm_vec)

        rdm_array = rdm_vec.reshape(-1, 1)
        rdm_array = minmax_scale(rdm_array, (0, 1))
        rdm_array = rdm_array.flatten()
        rdm_rank_scale = vec_to_sym_matrix(rdm_array)

        ax = sns.heatmap(rdm_rank_scale, xticklabels=categories, yticklabels=y_categories, cmap=cmap)
        ax.set_yticklabels(y_categories, rotation=0)
        ax.xaxis.tick_top()
        ax.set_xticklabels(categories, rotation=90)
        ax.collections[0].colorbar.set_label("pairwise similarities, rank transformed & scaled [0,1]")
        plt.tight_layout()

    if model is True:

        rdm = rdm.to_numpy()

        rdm_vec = sym_matrix_to_vec(rdm)

        rdm_array = rdm_vec.reshape(-1, 1)
        rdm_array = minmax_scale(rdm_array, (0, 1))
        rdm_array = rdm_array.flatten()
        rdm_scale = vec_to_sym_matrix(rdm_array)

        ax = sns.heatmap(rdm_scale, xticklabels=categories, yticklabels=y_categories, cmap=cmap)
        ax.set_yticklabels(y_categories, rotation=0)
        ax.xaxis.tick_top()
        ax.set_xticklabels(categories, rotation=90)
        ax.collections[0].colorbar.set_label("pairwise similarities, scaled [0,1]")
        plt.tight_layout()
コード例 #19
0
def plot_conn_hist(matrix_file, modules, atlas=False, output_file="hist.png"):

    from matplotlib import pyplot as plt
    from matplotlib import gridspec as mgs
    import matplotlib.cm as cm
    from matplotlib.colors import ListedColormap
    import PUMI.utils.globals as glb
    import pandas as pd
    import numpy as np
    import nibabel as nb
    from nilearn.plotting import plot_img
    import os
    from nilearn.connectome import sym_matrix_to_vec

    # load matrix file
    mat = pd.read_csv(matrix_file, sep="\t")
    mat.set_index('Unnamed: 0', inplace=True)
    regnum = mat.shape[0]

    mat = mat.values

    histdata=[]
    # create a list of modular connectivities
    for mod in pd.Series(modules).unique():
        idx = np.array(np.where([m == mod for m in modules])).flatten()
        submat = mat[np.ix_(idx, idx)]
        mat[np.ix_(idx, idx)] = None
        histdata.insert(0, np.array(sym_matrix_to_vec(submat)))

    histdata.insert(0, np.array([0])) # temporary hack to make nicer inter-modular colors
    histdata.insert(0, np.array(sym_matrix_to_vec(mat)))

    mycolors = ListedColormap(cm.get_cmap('Set1').colors[:7][::-1])
    modules = pd.Series(modules).values
    lut = pd.factorize(modules)[0] + 1

    legend = False
    if atlas:
        legend = True

    # Define nested GridSpec
    wratios = [100, 20]
    subplot = mgs.GridSpec(1, 1)[0]
    gs = mgs.GridSpecFromSubplotSpec(1, 1 + int(legend), subplot_spec=subplot,
                                     width_ratios=wratios[:2 + int(legend)],
                                     wspace=0.0)

    ax0 = plt.subplot(gs[0])
    # colorbars are very much hardcoded
    # TODO: fix colorbars (make it work for other atlases)
    cols = cm.get_cmap('Set1').colors[:9][::-1]
    plt.hist(histdata, stacked=True, normed=True, color=cols)

    if legend:
        gslegend = mgs.GridSpecFromSubplotSpec(
            5, 1, subplot_spec=gs[1], wspace=0.0, hspace=0.0)

        background_file = glb._FSLDIR_ + "/data/standard/MNI152_T1_3mm_brain.nii.gz" #TODO: works only for 3mm atlas
        background = nb.load(background_file)
        atlas = nb.load(atlas)

        nslices = background.shape[-1]
        #coords = np.linspace(int(0 * nslices), int(0.99 * nslices), 5).astype(np.uint8)
        coords = [-40, 20, 0, 20, 40] #works in MNI space
        lut2 = lut
        lut2 = np.array([0] + lut2.tolist())

        relabeled = lut2[np.array(atlas.get_data(), dtype=int)]
        atl = nb.Nifti1Image(relabeled, atlas.get_affine())
        for i, c in enumerate(coords):
            ax2 = plt.subplot(gslegend[i])
            plot_img(atl, bg_img=background, axes=ax2, display_mode='z',
                     annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
                     interpolation='nearest', vmin=1, vmax=7)

    if output_file is not None:
        figure = plt.gcf()
        figure.savefig(output_file, bbox_inches='tight')
        plt.close(figure)
        return os.getcwd() + '/' + output_file

    return 0