Exemplo n.º 1
0
def pair_dist(rand_pair,
              sub_files,
              sub_data=[],
              reg_var=[],
              len_time=235,
              data_field='dtseries'):
    """ Pair distance """
    sub_data = np.array(sub_data)
    if sub_data.size > 0:
        sub1_data = sub_data[:, :, rand_pair[0]]
        sub2_data = sub_data[:, :, rand_pair[1]]
    else:
        sub1_data = spio.loadmat(sub_files[rand_pair[0]])[data_field].T
        sub2_data = spio.loadmat(sub_files[rand_pair[1]])[data_field].T
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])

    sub2_data, _ = brainSync(X=sub1_data, Y=sub2_data)
    fmri_diff = sp.sum((sub2_data - sub1_data)**2, axis=0)

    # Returns SQUARE of the distance
    if len(reg_var) > 0:
        regvar_diff = sp.square(reg_var[rand_pair[0]] - reg_var[rand_pair[1]])
        return fmri_diff, regvar_diff
    else:
        return fmri_diff
Exemplo n.º 2
0
def pair_dist_two_groups(rand_pair,
                         sub_grp1_files,
                         sub_grp2_files,
                         sub_data1=[],
                         sub_data2=[],
                         len_time=235):

    sub_data1 = np.array(sub_data1)
    sub_data2 = np.array(sub_data2)
    """ Pair distance for two groups of subjects """
    if sub_data1.size > 0:
        sub1_data = sub_data1[:, :, rand_pair[0]]
    else:
        sub1_data = spio.loadmat(sub_grp1_files[rand_pair[0]])['dtseries'].T
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])

    if sub_data2.size > 0:
        sub2_data = sub_data2[:, :, rand_pair[1]]
    else:
        sub2_data = spio.loadmat(sub_grp2_files[rand_pair[1]])['dtseries'].T
        sub2_data, _, _ = normalizeData(sub1_data[:len_time, :])

    sub2_data, _ = brainSync(X=sub1_data, Y=sub2_data)
    fmri_diff = sp.sum((sub2_data - sub1_data)**2, axis=0)

    # Returns SQUARE of the distance
    return fmri_diff
Exemplo n.º 3
0
def dist2atlas_reg(bfp_path, ref_atlas, sub_files, reg_var, len_time=235):
    """ Perform regression stats based on square distance to atlas """
    print('dist2atlas_reg, assume that the data is normalized')

    num_vert = ref_atlas.shape[1]
    num_sub = len(sub_files)

    # Take absolute value of difference from the mean
    # for the IQ measure
    reg_var = sp.absolute(reg_var - sp.mean(reg_var))

    diff = sp.zeros((num_vert, num_sub))

    # Compute distance to atlas
    for ind in tqdm(range(num_sub)):
        sub_data = spio.loadmat(sub_files[ind])['dtseries'].T
        sub_data, _, _ = normalizeData(sub_data[:len_time, :])
        Y2, _ = brainSync(X=ref_atlas, Y=sub_data)
        diff[:, ind] = sp.sum((Y2 - ref_atlas)**2, axis=0)

    corr_pval = sp.zeros(num_vert)
    for vrt in tqdm(range(num_vert)):
        _, corr_pval[vrt] = sp.stats.pearsonr(diff[vrt, :], reg_var)

    corr_pval[sp.isnan(corr_pval)] = .5

    lab = spio.loadmat(bfp_path + '/supp_data/USCBrain_grayord_labels.mat')
    labs = lab['labels'].squeeze()

    corr_pval_fdr = sp.zeros(num_vert)
    _, pv = fdrcorrection(corr_pval[labs > 0])
    corr_pval_fdr[labs > 0] = pv

    return corr_pval, corr_pval_fdr
Exemplo n.º 4
0
def sync2atlas(atlas, sub_data):
    print('Syncing to atlas, assume that the data is normalized')

    # Assume that the sub_data is already normalized
    syn_data = sp.zeros(sub_data.shape)
    for ind in tqdm(range(sub_data.shape[2])):
        syn_data[:, :, ind], _ = brainSync(X=atlas, Y=sub_data[:, :, ind])

    return syn_data
Exemplo n.º 5
0
def pairsdist_regression(bfp_path,
                         sub_files,
                         reg_var,
                         num_perm=1000,
                         num_pairs=0,
                         len_time=235):
    """ Perform regression stats based on square distance between random pairs """

    # Get the number of vertices from a file
    num_vert = spio.loadmat(sub_files[0])['dtseries'].shape[0]
    num_sub = len(sub_files)

    # Allocate memory for subject data
    sub_data = np.zeros(shape=(len_time, num_vert, num_sub))

    # Generate random pairs
    print('Reading subjects')
    for subno, filename in enumerate(tqdm(sub_files)):
        data = spio.loadmat(filename)['dtseries'].T
        sub_data[:, :, subno], _, _ = normalizeData(data[:len_time, :])

    pairs = list(itertools.combinations(range(num_sub), r=2))

    if num_pairs > 0:
        rn = np.random.permutation(len(pairs))
        pairs = [pairs[i] for i in rn]
        pairs = pairs[:num_pairs]

    fmri_diff = sp.zeros((num_vert, len(pairs)))
    regvar_diff = sp.zeros(len(pairs))

    print('Computing pairwise differences')
    for pn, pair in enumerate(tqdm(pairs)):
        Y2, _ = brainSync(X=sub_data[:, :, pair[0]], Y=sub_data[:, :, pair[1]])
        fmri_diff[:, pn] = np.sum((Y2 - sub_data[:, :, pair[0]])**2, axis=0)
        regvar_diff[pn] = (reg_var[pair[0]] - reg_var[pair[1]])**2

    corr_pval = corr_perm_test(X=fmri_diff.T, Y=regvar_diff)

    #    corr_pval = sp.zeros(num_vert)
    #    for ind in tqdm(range(num_vert)):
    #        _, corr_pval[ind] = sp.stats.pearsonr(fmri_diff[ind, :], regvar_diff)
    #    corr_pval[sp.isnan(corr_pval)] = .5
    #

    labs = spio.loadmat(
        bfp_path +
        '/supp_data/USCBrain_grayordinate_labels.mat')['labels'].squeeze()
    labs[sp.isnan(labs)] = 0

    corr_pval[labs == 0] = 0.5

    corr_pval_fdr = 0.5 * sp.ones(num_vert)
    _, corr_pval_fdr[labs > 0] = fdrcorrection(corr_pval[labs > 0])

    return corr_pval, corr_pval_fdr
Exemplo n.º 6
0
def lin_reg(bfp_path,
            ref_atlas,
            sub_files,
            reg_var,
            Vndim=235,
            Sndim=20,
            len_time=235):
    """ Perform regression stats based on distance to atlas """

    num_vert = ref_atlas.shape[1]
    num_sub = len(sub_files)
    a = spio.loadmat(bfp_path + '/supp_data/USCBrain_grayord_labels.mat')
    labs = a['labels'].squeeze()

    labs[sp.isnan(labs)] = 0
    print('Computing PCA basis function from the atlas')
    pca = PCA(n_components=Vndim)
    pca.fit(ref_atlas.T)

    reduced_data = sp.zeros((Vndim, num_vert, num_sub))
    for ind in tqdm(range(num_sub)):

        sub_data = spio.loadmat(sub_files[ind])['dtseries'].T
        sub_data, _, _ = normalizeData(sub_data[:len_time, :])
        Y2, _ = brainSync(X=ref_atlas, Y=sub_data)

        if Vndim == len_time:
            reduced_data[:, :, ind] = sub_data
        else:
            reduced_data[:, :, ind] = pca.transform(Y2.T).T

    pval_linreg = sp.zeros(num_vert)

    pca = PCA(n_components=Sndim)

    for vrt in tqdm(range(num_vert)):
        X = reduced_data[:, vrt, :]
        if Sndim != num_sub:
            pca.fit(X.T)
            X = pca.transform(X.T).T
        X = sm.add_constant(X.T)
        est = sm.OLS(reg_var, X)
        pval_linreg[vrt] = est.fit().f_pvalue

    print('Regression is done')

    pval_linreg[sp.isnan(pval_linreg)] = .5

    pval_linreg_fdr = sp.zeros(num_vert)
    _, pv = fdrcorrection(pval_linreg[labs > 0])
    pval_linreg_fdr[labs > 0] = pv

    return pval_linreg, pval_linreg_fdr
Exemplo n.º 7
0
def pair_dist(rand_pair, sub_files, reg_var, len_time=235):
    """ Pair distance """
    sub1_data = spio.loadmat(sub_files[rand_pair[0]])['dtseries'].T
    sub2_data = spio.loadmat(sub_files[rand_pair[1]])['dtseries'].T

    sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
    sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])

    sub2_data, _ = brainSync(X=sub1_data, Y=sub2_data)
    fmri_diff = sp.sum((sub2_data - sub1_data)**2, axis=0)
    regvar_diff = sp.square(reg_var[rand_pair[0]] - reg_var[rand_pair[1]])

    return fmri_diff, regvar_diff
Exemplo n.º 8
0
def sub2ctrl_dist(sub_file, ctrl_files, len_time=235):
    """ Compare a subject to controls """

    sub_data = spio.loadmat(sub_file)['dtseries'].T
    sub_data, _, _ = normalizeData(sub_data[:len_time, :])

    num_vert = sub_data.shape[1]
    fmri_diff = sp.zeros((num_vert, len(ctrl_files)))

    for ind, fname in enumerate(tqdm(ctrl_files)):
        ctrl_data = spio.loadmat(fname)['dtseries'].T
        ctrl_data, _, _ = normalizeData(ctrl_data[:len_time, :])
        ctrl_data, _ = brainSync(X=sub_data, Y=ctrl_data)
        fmri_diff[:, ind] = sp.sum((sub_data - ctrl_data)**2, axis=0)

    return fmri_diff
Exemplo n.º 9
0
def randpairsdist_reg(bfp_path,
                      sub_files,
                      reg_var,
                      num_pairs=1000,
                      len_time=235):
    """ Perform regression stats based on square distance between random pairs """
    print('dist2atlas_reg, assume that the data is normalized')
    print('This function is deprecated!!!!!!!!!!')

    # Get the number of vertices from a file
    num_vert = spio.loadmat(sub_files[0])['dtseries'].shape[0]

    # Generate random pairs
    rand_pairs = sp.random.choice(len(sub_files), (num_pairs, 2), replace=True)

    fmri_diff = sp.zeros((num_vert, num_pairs))
    regvar_diff = sp.zeros(num_pairs)

    print('Reading subjects')

    # Compute distance to atlas
    for ind in tqdm(range(num_pairs)):
        sub1_data = spio.loadmat(sub_files[rand_pairs[ind, 0]])['dtseries'].T
        sub2_data = spio.loadmat(sub_files[rand_pairs[ind, 1]])['dtseries'].T

        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])

        sub2_data, _ = brainSync(X=sub1_data, Y=sub2_data)
        fmri_diff[:, ind] = sp.sum((sub2_data - sub1_data)**2, axis=0)
        regvar_diff[ind] = sp.square(reg_var[rand_pairs[ind, 0]] -
                                     reg_var[rand_pairs[ind, 1]])

    corr_pval = sp.zeros(num_vert)
    for ind in tqdm(range(num_vert)):
        _, corr_pval[ind] = sp.stats.pearsonr(fmri_diff[ind, :], regvar_diff)

    corr_pval[sp.isnan(corr_pval)] = .5

    labs = spio.loadmat(bfp_path + '/supp_data/USCBrain_grayord_labels.mat'
                        )['labels'].squeeze()

    corr_pval_fdr = sp.zeros(num_vert)
    _, corr_pval_fdr[labs > 0] = fdrcorrection(corr_pval[labs > 0])

    return corr_pval, corr_pval_fdr
Exemplo n.º 10
0
def pair_dist_simulation(rand_pair,
                         sub_files,
                         sub_data=[],
                         reg_var=[],
                         len_time=235,
                         roi=[]):
    """ Pair distance """

    # normalize the clinical variable
    reg_var_norm, _, _ = normalizeData(reg_var)

    roi_ind, _ = np.where(roi)

    noise_data = (reg_var_norm - np.min(reg_var_norm)) * np.random.normal(
        size=(len(roi_ind), len_time, len(reg_var)))

    sub_data = np.array(sub_data)
    if sub_data.size > 0:
        sub1_data = sub_data[:, :, rand_pair[0]]
        sub2_data = sub_data[:, :, rand_pair[1]]
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])
        sub1_data += noise_data[:, :, rand_pair[0]]
        sub2_data += noise_data[:, :, rand_pair[1]]
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])
    else:
        sub1_data = spio.loadmat(sub_files[rand_pair[0]])['dtseries'].T
        sub2_data = spio.loadmat(sub_files[rand_pair[1]])['dtseries'].T
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])
        sub1_data[:len_time, roi_ind] += noise_data[:, :, rand_pair[0]].T
        sub2_data[:len_time, roi_ind] += noise_data[:, :, rand_pair[1]].T
        sub1_data, _, _ = normalizeData(sub1_data[:len_time, :])
        sub2_data, _, _ = normalizeData(sub2_data[:len_time, :])

    sub2_data, _ = brainSync(X=sub1_data, Y=sub2_data)
    fmri_diff = sp.sum((sub2_data - sub1_data)**2, axis=0)

    # Returns SQUARE of the distance
    if len(reg_var) > 0:
        regvar_diff = sp.square(reg_var[rand_pair[0]] - reg_var[rand_pair[1]])
        return fmri_diff, regvar_diff
    else:
        return fmri_diff
Exemplo n.º 11
0
def load_bfp_dataT_dist2atlas(sub_fname, atlas_fname, LenTime, matchT):
    ''' sub_fname: list of filenames of .mat files that contains Time x Vertex matrix of subjects' preprocessed fMRI data '''
    ''' LenTime: number of timepoints in data. this should be the same in all subjects '''
    ''' Outputs 3D matrix: Time x Vector x Subjects '''
    count1 = 0
    subN = len(sub_fname)
    print('loading data for ' + str(subN) + ' subjects')
    pbar = tqdm(total=subN)
    numT = np.zeros(subN)

    atlas_data = spio.loadmat(atlas_fname)
    atlas = atlas_data['atlas_data']

    subTest_diff = np.zeros((atlas.shape[1], subN))

    for ind in range(subN):
        fname = sub_fname[ind]
        df = spio.loadmat(fname)
        data = df['dtseries'].T
        numT[ind] = data.shape[0]
        if int(data.shape[0]) != LenTime:
            if bool(matchT) == True:
                t = int(LenTime - numT[ind])
                v = data.shape[1]
                temp = np.zeros((t, v))
                data = np.concatenate((data, temp))
            else:
                print(sub_fname[ind] +
                      ' does not have the correct number of timepoints')
        d, _, _ = normalizeData(data)
        syn_data, _ = brainSync(X=atlas, Y=d)
        subTest_diff[:, ind], _ = dist2atlas_sub(atlas, syn_data)

        count1 += 1
        pbar.update(1)
        if count1 == subN:
            break

    pbar.close()

    print('loaded data for ' + str(subN) + ' subjects')
    return subTest_diff, numT
Exemplo n.º 12
0
sub = lst[0]
data = scipy.io.loadmat(
    os.path.join(p_dir, sub, sub + '.rfMRI_REST2_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
LR_flag = msk['LR_flag']
LR_flag = np.squeeze(LR_flag) != 0
data = data['ftdata_NLM']
temp = data[LR_flag, :]

d2 = temp.T

ind = 0
IntV = range(10, 1200, 10)
rms = sp.zeros(len(IntV))
for len1 in IntV:
    sub_data1, _, _ = normalizeData(d1[:len1, :])
    sub_data2, _, _ = normalizeData(d2[:len1, :])
    s = sp.std(sub_data2, axis=0)
    sub_data1 = sub_data1[:, s > 1e-2]
    sub_data2 = sub_data2[:, s > 1e-2]
    sub_data2_sync, Rot = brainSync(X=sub_data1, Y=sub_data2)
    rms[ind] = sp.linalg.norm(sub_data2_sync - sub_data1) / sp.sqrt(
        sp.linalg.norm(sub_data2_sync)**2 + sp.linalg.norm(sub_data1)**2)
    ind += 1
    print len1, ':', rms[ind - 1]

plt.plot(IntV, rms)
plt.ylim(ymax=0.7, ymin=0.30)
plt.savefig('sync_vs_len_same_sub2.pdf')
plt.show()
Exemplo n.º 13
0
dat = spio.loadmat('/big_disk/ajoshi/with_andrew/100307/100307.\
rfMRI_REST1_RL.reduce3.ftdata.NLM_11N_hvar_5.mat')
fmotor = dat['ftdata_NLM'].T
fmotor = fmotor[:284, :]
fmotor, _, _ = normalizeData(fmotor)

dat = spio.loadmat('/big_disk/ajoshi/with_andrew/100307/100307.\
rfMRI_REST1_LR.reduce3.ftdata.NLM_11N_hvar_5.mat')
frest = dat['ftdata_NLM'].T
frest = frest[:fmotor.shape[0], :]
frest, _, _ = normalizeData(frest)

diffbefore = fmotor - frest

fmotor, _ = brainSync(frest, fmotor)

diffafter = fmotor - frest

plt.imshow(sp.absolute(diffbefore), aspect='auto', clim=(0, 0.1))

plt.colorbar()
plt.savefig('dist_motor_before.pdf', dpi=300)
plt.show()
plt.figure()
plt.imshow(sp.absolute(diffafter), aspect='auto', clim=(0, .1))
plt.colorbar()
plt.savefig('dist_motor_after.pdf', dpi=300)
plt.show()
#diffafter = gaussian_filter(diffafter, [2, 0])
nV = len(dfs_right_sm.vertices)
Exemplo n.º 14
0
               elevation=180,
               roll=90,
               outfile='sub1to2_view1_pc.png',
               show=1)
view_patch_vtk(dfs_left_sm,
               azimuth=-90,
               elevation=-180,
               roll=-90,
               outfile='sub1to2_view2_pc.png',
               show=1)

#sm=smooth_patch(dfs_left,iter=1000)
#view_patch(sm)
#view_patch(dfs_left,rho)

sub_rot, R1 = brainSync(X=sub1, Y=sub2)
#sub_rot2, R2, C2 = rot_sub_data(sub1.T, sub2.T)
#sub_rot=sub_rot.T
rho = sp.dot(ref_mean_pc, sub_rot) / ref_mean_pc.shape[0]
#rho=rho[0,1:]
rho[~np.isfinite(rho)] = 0
#rho = smooth_surf_function(dfs_left_sm, rho)
dfs_left.attributes = rho
dfs_left_sm = patch_color_attrib(dfs_left_sm, rho, clim=[-1, 1])
view_patch_vtk(dfs_left_sm,
               azimuth=90,
               elevation=180,
               roll=90,
               outfile='sub1to2_view1_pc_rot.png',
               show=1)
view_patch_vtk(dfs_left_sm,
Exemplo n.º 15
0
pbar = tqdm(total=len(subj))
for i in range(len(subj)):
    print(str(n))
    n = n + 1
    fname = os.path.join(dirname, subj[i], 'func', subj[i] + ext)
    if os.path.isfile(fname):
        df = spio.loadmat(fname)
        data = df['dtseries'].T
        if int(data.shape[0]) == LenTime:
            sub_ID.append(subj[i])
            sub_fname.append(fname)
    pbar.update(1)
np.savetxt(out_dir + "/subjects.csv", sub_ID, delimiter=",", fmt='%s')
write_text_timestamp(
    flog, 'data for ' + str(len(sub_ID)) +
    ' subjects will be used for atlas creation')
sub_data = load_bfp_data(sub_fname, LenTime)
#%% run group sync
groupatlas_data, _, _, _ = groupBrainSync(sub_data)
spio.savemat(os.path.join(out_dir + '/group_atlas.mat'),
             {'groupatlas_data': groupatlas_data})
write_text_timestamp(flog, 'completed group sync')
#%% find representative subject
subRef_data, q = IDrefsub_BrainSync(sub_data)
write_text_timestamp(flog, 'representative subject is ' + str(sub_ID[q]))
#%% sync group atlas to representative subject
atlas_data, _ = brainSync(subRef_data, groupatlas_data)
spio.savemat(os.path.join(out_dir + '/atlas.mat'), {'atlas_data': atlas_data})
write_text_timestamp(
    flog,
    'group atlas synced to representative subject. group-MDS_atlas done.')
Exemplo n.º 16
0
    data = scipy.io.loadmat(
        os.path.join(
            p_dir, sub, sub + '.rfMRI_REST1_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
    data = data['ftdata_NLM']
    sub1L, _, _ = normalizeData(data[~LR_flag, :].T)
    sub1R, _, _ = normalizeData(data[LR_flag, :].T)

    data = scipy.io.loadmat(
        os.path.join(
            p_dir, sub, sub + '.rfMRI_REST2_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
    data = data['ftdata_NLM']
    sub2L, _, _ = normalizeData(data[~LR_flag, :].T)
    sub2R, _, _ = normalizeData(data[LR_flag, :].T)
    _, R = brainSync(X=sub1L, Y=sub2L)
    avgCorrL += sp.sum(sub1L * sp.dot(R, sub2L), axis=0)
    avgCorrR += sp.sum(sub1R * sp.dot(R, sub2R), axis=0)
    nSub += 1
    print nSub,

avgCorrL = avgCorrL / nSub
avgCorrR = avgCorrR / nSub

# plot correlations in right hemisphere
dfs_right_sm = patch_color_attrib(dfs_right_sm, avgCorrR, clim=[0, 1])
view_patch_vtk(dfs_right_sm,
               azimuth=-90,
               elevation=-180,
               roll=-90,
               outfile='corrLR_right1.png',
    vrest_subs[:, :, ind1], _, _ = normalizeData(vrest)
#    print(ind1, end=' ')
    print ind1,

# %%
#  Build Null Distribution
nVert = vrest_subs.shape[1]
rho_null = sp.zeros([nsub, nVert])
for ind1 in range(nsub):
    t = 0
    for ind2 in range(nsub):
        if ind1 == ind2:
            continue
        vrest1 = vrest_subs[:, :, ind1]
        vrest2 = vrest_subs[:, :, ind2]
        vrest2, Rot = brainSync(X=vrest1, Y=vrest2)
        t += sp.sum(vrest1*vrest2, axis=0)

    rho_null[ind1, :] = t/(nsub-1)
    print 'rho(%d)=%g' % (ind1, sp.mean(t))

sp.savez('fcon1000_null_nsub50.npz', rho_null=rho_null, vrest_subs=vrest_subs)


# %%
# Read Candidate Subject to be tested against normals


rho_sub = sp.zeros([nsub, nVert])
for ind1 in range(nsub):
    vrest = vrest_subs[:, :, ind1]
    print(count1, )

#%% Compute pairwise distance
nSub = count1
sub_data = sub_data[:, :, :nSub]

print(nSub)
dist_all_orig = sp.zeros([nSub, nSub])
dist_all_rot = dist_all_orig.copy()
#sub_data_orig = sub_data.copy()

for ind1 in range(nSub):
    for ind2 in range(nSub):
        dist_all_orig[ind1, ind2] = sp.linalg.norm(sub_data[:, :, ind1] -
                                                   sub_data[:, :, ind2])
        sub_data_rot, _ = brainSync(
            X=sub_data[:, :, ind1].T, Y=sub_data[:, :, ind2].T)
        dist_all_rot[ind1, ind2] = sp.linalg.norm(sub_data[:, :, ind1] -
                                                  sub_data_rot.T)
        print(ind1, ind2, dist_all_rot[ind1, ind2])

q = sp.argmin(dist_all_rot.sum(1))
print('The representative subject is: %s ' % lst2[q])
m = MDS(n_components=2, dissimilarity='precomputed')
e = m.fit_transform(dist_all_rot)
print(e)
fig, ax = plt.subplots()
ax.scatter(e[:, 0], e[:, 1])
for i in range(e.shape[0]):
    ax.annotate(lst2[i][30:37], (e[i, 0], e[i, 1]))

ax.set_title('MDS Plot of the subjects')
Exemplo n.º 19
0
    d2, _, _ = normalizeData(temp.T)

#    d2 = temp

    sub_data1 = d1.T
    sub_data2 = d2.T

#    ind1 = s>1e-10

    dist_all_orig = sp.zeros(len(dfs_left_sm.vertices))
#    dist_all_rot = dist_all_orig.copy()
    sub_data_orig1 = sub_data1.copy()
    sub_data_orig2 = sub_data2.copy()

    dist_all_orig = sub_data_orig1-sub_data_orig2
    sub_data2, _ = brainSync(X=sub_data1.T, Y=sub_data2.T)
    sub_data2 = sub_data2.T
    # rot_sub_data(ref=sub_data1, sub=sub_data2)
    dist_all_rot += sub_data1-sub_data2
    print ind,

plt.figure()
plt.set_cmap('jet')
plt.imshow(sp.absolute(dist_all_orig), aspect='auto', clim=(0, 0.1))
plt.colorbar()
plt.savefig('dist_before_task.pdf', dpi=300)
plt.show()
plt.figure()

plt.set_cmap('jet')
plt.imshow(sp.absolute(dist_all_rot/40), aspect='auto', clim=(0, 0.1))
Exemplo n.º 20
0
    if count1 == 0:
        sub_data = sp.zeros((235, d.shape[1], len(normSub)))

    sub_data[:, :, count1] = d[:235, ]
    count1 += 1
    print(count1, )
    if count1 == 50:
        break

#%% Create Average atlas by synchronizing everyones data to one subject
atlas = 0
q = 3
nSub = len(normSub)
for ind in range(nSub):
    Y2, _ = brainSync(X=sub_data[:, :, q], Y=sub_data[:, :, ind])
    atlas += Y2
atlas /= (nSub)
spio.savemat('ADHD_avg_atlas.mat', {'atlas': atlas})

#%% Compute PCA basis using atlas

pca = PCA(n_components=NDim)
pca.fit(atlas.T)
print(pca.explained_variance_ratio_)

#%% Read Normal Subjects
normSub = normSubOrig[50:135]
count1 = 0
for sub in normSub:
    fname = os.path.join(p_dir, sub + '_rest_bold.32k.GOrd.mat')
Exemplo n.º 21
0
p = PCA(n_components=NCMP)
D = p.fit_transform(X.T)

print("Explained Variance Fraction = %f" % p.explained_variance_ratio_.sum())

# D is the exeplar data
D, _, _ = normalizeData(D.T)

nT = Xtsk.shape[0]

Xnew = np.zeros(Xtsk.shape)
Cind = np.int((NCMP - 1) / 2 + 1)
for i in range(Xtsk.shape[0] - NCMP):
    xin = Xtsk[i:i + NCMP, :]
    xin, _, nrm = normalizeData(xin)
    dd, _ = brainSync(xin, D)
    dd = dd * nrm
    Xnew[Cind + i, :] = dd[Cind, :]
    print("%d" % i, end=',')

#a = nib.cifti2.Cifti2Image(Xnew, sub1.header, file_map=sub1.file_map)
#a.to_filename('outfile_task.nii')
#
#a = nib.cifti2.Cifti2Image(Xtsk-Xnew, sub1.header, file_map=sub1.file_map)
#a.to_filename('outfile_diff.nii')

#loading cifti files has indices garbled
#%%
fname1 = 'right_motor1.png'
fname2 = 'right_motor2.png'
Exemplo n.º 22
0
def load_all_data(studydir, epi_txt, test_epi_txt, nonepi_txt, test_nonepi_txt, atlas_labels):

    atlas = spio.loadmat(atlas_labels)

    gord_labels = atlas['labels'].squeeze()
    # print(len(gord_labels))
    # pdb.set_trace()
    label_ids = np.unique(gord_labels)  # unique label ids

    # remove WM label from connectivity analysis
    label_ids = np.setdiff1d(label_ids, (2000, 0))

    with open(epi_txt) as f:
        epiIds = f.readlines()

    with open(nonepi_txt) as f:
        nonepiIds = f.readlines()

    epiIds = list(map(lambda x: x.strip(), epiIds))
    nonepiIds = list(map(lambda x: x.strip(), nonepiIds))

    load_lesion_maps(epiIds, nonepiIds, gord_labels, label_ids)
    # random.shuffle(epiIds)
    # random.shuffle(nonepiIds)
    # print(len(epiIds), epiIds)
    epi_files = list()
    nonepi_files = list()

    for sub in epiIds:
        fname = os.path.join(studydir, sub, 'BFP', sub, 'func',
                             sub + '_rest_bold.32k.GOrd.mat')
        if os.path.isfile(fname):
            epi_files.append(fname)

    for sub in nonepiIds:
        fname = os.path.join(studydir, sub, 'BFP', sub, 'func',
                             sub + '_rest_bold.32k.GOrd.mat')
        if os.path.isfile(fname):
            nonepi_files.append(fname)

    epi_data = load_bfp_data(epi_files, 171)
    nonepi_data = load_bfp_data(nonepi_files, 171)

    # nsub = epi_data.shape[2]
    #==============================================================
    nsub = min(epi_data.shape[2], nonepi_data.shape[2])
    epiIds = epiIds[:nsub]
    nonepiIds = nonepiIds[:nsub]
    #===============================================================

    conn_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    cent_mat = np.zeros((nsub, len(label_ids)))
    input_feat = np.zeros((nsub, len(label_ids), epi_data.shape[0]))
    print(conn_mat.shape, input_feat.shape)
    print(epi_data.shape, nonepi_data.shape, gord_labels.shape)

    _, ref_sub = get_connectivity(nonepi_data[:, :, 0],
                            labels=gord_labels,
                            label_ids=label_ids)


    for subno in range(nsub): # num of subjects
        conn_mat[subno, :, :], time_series = get_connectivity(epi_data[:, :, subno],
                                                 labels=gord_labels,
                                                 label_ids=label_ids)

        #G = nx.convert_matrix.from_numpy_array(np.abs(conn_mat[subno, :, :]))
        #cent = nx.eigenvector_centrality(G, weight='weight')
        #cent_mat[subno, :] = np.array(list(cent.items()))[:,1]
        # print(ref_sub.shape, time_series.shape)
        input_feat[subno, :, :] = np.transpose(brainSync(ref_sub.T, time_series.T)[0])

    np.savez('PTE_graphs_gcn_brain.npz',
             conn_mat=conn_mat,
             features=input_feat, # 36x16x171
             label_ids=label_ids,
             cent_mat=cent_mat)
##============================================================================
    print("non_epi")
    # nsub = nonepi_data.shape[2]

    conn_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    cent_mat = np.zeros((nsub, len(label_ids)))
    input_feat = np.zeros((nsub, len(label_ids), nonepi_data.shape[0]))
    print(conn_mat.shape, input_feat.shape)
    # here we are using same number of training subjects for epi and nonepi.
    for subno in range(nsub):
        conn_mat[subno, :, :], time_series = get_connectivity(nonepi_data[:, :, subno],
                                                 labels=gord_labels,
                                                 label_ids=label_ids)
        #G = nx.convert_matrix.from_numpy_array(np.abs(conn_mat[subno, :, :]))
        #cent = nx.eigenvector_centrality(G, weight='weight')
       # cent_mat[subno, :] = np.array(list(cent.items()))[:,1]
        input_feat[subno, :, :] = np.transpose(brainSync(ref_sub.T, time_series.T)[0])

    np.savez('NONPTE_graphs_gcn_brain.npz',
             conn_mat=conn_mat, # n_subjects*16*16
             features=input_feat, # n_subjects * 16 x 171
             label_ids=label_ids,
             cent_mat=cent_mat)

    print('done')
data = data['ftdata_NLM']
d1, _, _ = normalizeData(data[LR_flag, :].T)

data = scipy.io.loadmat(
    os.path.join(p_dir, sub, sub + '.rfMRI_REST2_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
data = data['ftdata_NLM']
temp, _, _ = normalizeData(data[LR_flag, :].T)

null_corr = sp.zeros((len(dfs_left_sm.vertices), 1000))

for iter1 in sp.arange(1000):
    perm1 = np.random.permutation(temp.shape[1])
    d2 = temp[:, perm1]

    d2, R = brainSync(X=d1, Y=d2)
    null_corr[:, iter1] = sp.sum(d1 * d2, axis=0)
    print iter1,

d2, R = brainSync(X=d1, Y=temp)

scorr = sp.sum(d1 * d2, axis=0)

c = scorr[:, None] < null_corr

pval = sp.mean(c, axis=1)

dfs_left_sm = patch_color_attrib(dfs_left_sm, 1 - pval, clim=[0.95, 1])
view_patch_vtk(dfs_left_sm,
               azimuth=-90,
               elevation=-180,
Exemplo n.º 24
0
    sub = lst[ind]
    data = spio.loadmat(
        os.path.join(
            p_dir, sub, sub + '.rfMRI_REST1_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
    LR_flag = msk['LR_flag']
    LR_flag = np.squeeze(LR_flag) != 0
    data = data['ftdata_NLM']
    #   temp = data[LR_flag, :]
    #    m = np.mean(temp, 1)
    #    temp = temp - m[:, None]
    #    s = np.std(temp, 1)+1e-16
    #    temp = temp/s[:, None]
    #    temp = temp[:, :d1.shape[0]]
    d2, _, _ = normalizeData(data.T)
    d2, _ = brainSync(dr, d2)
    meanData = meanData + d2
    print(ind, end=',')

# %% Do the PCA
np.savez('mean_data_filt.npz', meanData=meanData)
p = PCA(n_components=NCMP)
D = p.fit_transform(meanData.T).T

# %% Explained variance
_, s, _ = np.linalg.svd(np.dot(meanData, meanData.T))
plt.figure()
plt.plot(s[:50])
plt.title('sigma plot')

# %%
Exemplo n.º 25
0
def load_all_data(atlas_labels):

    atlas = spio.loadmat(atlas_labels)

    gord_labels = atlas['labels'].squeeze()

    label_ids = np.unique(gord_labels)  # unique label ids

    # remove WM label from connectivity analysis
    label_ids = np.setdiff1d(label_ids, (2000, 0))

    # with open(epi_txt) as f:
    #     epiIds = f.readlines()
    #
    # with open(nonepi_txt) as f:
    #     nonepiIds = f.readlines()
    #
    # epiIds = list(map(lambda x: x.strip(), epiIds))
    # nonepiIds = list(map(lambda x: x.strip(), nonepiIds))
    # # random.shuffle(epiIds)
    # # random.shuffle(nonepiIds)
    # # print(len(epiIds), epiIds)
    # epi_files = list()
    # nonepi_files = list()
    #
    # for sub in epiIds:
    #     fname = os.path.join(studydir, sub, 'BFP', sub, 'func',
    #                          sub + '_rest_bold.32k.GOrd.mat')
    #     if os.path.isfile(fname):
    #         epi_files.append(fname)
    #
    # for sub in nonepiIds:
    #     fname = os.path.join(studydir, sub, 'BFP', sub, 'func',
    #                          sub + '_rest_bold.32k.GOrd.mat')
    #     if os.path.isfile(fname):
    #         nonepi_files.append(fname)

    adhd_fnames, tdc_fnames, gt_labels = load_csv()
    adhd_data = load_bfp_data(adhd_fnames, 235)
    tdc_data = load_bfp_data(tdc_fnames, 235)

    nsub = adhd_data.shape[2]

    conn_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    parcorr_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    cent_mat = np.zeros((nsub, len(label_ids)))
    input_feat = np.zeros((nsub, len(label_ids), adhd_data.shape[0]))
    print(conn_mat.shape, input_feat.shape)
    print(adhd_data.shape, tdc_data.shape, gord_labels.shape)

    _, _, ref_sub = get_connectivity(tdc_data[:, :, 0],
                                     labels=gord_labels,
                                     label_ids=label_ids)

    for subno in range(nsub):  # num of subjects
        conn_mat[subno, :, :], parcorr_mat[
            subno, :, :], time_series = get_connectivity(adhd_data[:, :,
                                                                   subno],
                                                         labels=gord_labels,
                                                         label_ids=label_ids)

        #G = nx.convert_matrix.from_numpy_array(np.abs(conn_mat[subno, :, :]))
        #cent = nx.eigenvector_centrality(G, weight='weight')
        #cent_mat[subno, :] = np.array(list(cent.items()))[:,1]
        # print(ref_sub.shape, time_series.shape)
        input_feat[subno, :, :] = np.transpose(
            brainSync(ref_sub.T, time_series.T)[0])

    np.savez(
        '../ADHD_parPearson_BCI-DNI.npz',
        conn_mat=conn_mat,
        partial_mat=parcorr_mat,
        features=input_feat,  # 36x16x171
        label_ids=label_ids,
        cent_mat=cent_mat)

    ##============================================================================
    print("healthy subjects")
    nsub = tdc_data.shape[2]

    conn_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    parcorr_mat = np.zeros((nsub, len(label_ids), len(label_ids)))
    cent_mat = np.zeros((nsub, len(label_ids)))
    input_feat = np.zeros((nsub, len(label_ids), tdc_data.shape[0]))
    print(conn_mat.shape, input_feat.shape)
    # here we are using same number of training subjects for epi and nonepi.
    for subno in range(nsub):
        conn_mat[subno, :, :], parcorr_mat[
            subno, :, :], time_series = get_connectivity(tdc_data[:, :, subno],
                                                         labels=gord_labels,
                                                         label_ids=label_ids)
        #G = nx.convert_matrix.from_numpy_array(np.abs(conn_mat[subno, :, :]))
        #cent = nx.eigenvector_centrality(G, weight='weight')
        # cent_mat[subno, :] = np.array(list(cent.items()))[:,1]
        input_feat[subno, :, :] = np.transpose(
            brainSync(ref_sub.T, time_series.T)[0])

    np.savez(
        '../TDC_parPearson_BCI-DNI.npz',
        conn_mat=conn_mat,  # n_subjects*16*16
        partial_mat=parcorr_mat,
        features=input_feat,  # n_subjects * 16 x 171
        label_ids=label_ids,
        cent_mat=cent_mat)

    print('done')