Beispiel #1
0
def test_img_to_signals_labels_non_float_type(target_dtype):
    fake_fmri_data = np.random.RandomState(0).rand(10, 10, 10, 10) > 0.5
    fake_affine = np.eye(4, 4).astype(np.float64)
    fake_fmri_img_orig = nibabel.Nifti1Image(
        fake_fmri_data.astype(np.float64),
        fake_affine,
    )
    fake_fmri_img_target_dtype = new_img_like(
        fake_fmri_img_orig, fake_fmri_data.astype(target_dtype))
    fake_mask_data = np.ones((10, 10, 10), dtype=np.uint8)
    fake_mask = nibabel.Nifti1Image(fake_mask_data, fake_affine)

    masker = NiftiLabelsMasker(fake_mask)
    masker.fit()
    timeseries_int = masker.transform(fake_fmri_img_target_dtype)
    timeseries_float = masker.transform(fake_fmri_img_orig)
    assert np.sum(timeseries_int) != 0
    assert np.allclose(timeseries_int, timeseries_float)
#save for future usage
label_atlas_nii.to_filename(roiLabel)
#load saved labels
# label_atlas_nii= nib.load(roiLabel)

masker = NiftiLabelsMasker(labels_img=label_atlas_nii, standardize=True,
                           memory='nilearn_cache', verbose=0)
masker.fit()

corr_mat_vect_list = []
ind_list = []


for i_rs_img, rs_img in enumerate(rs_niis):
    print('%i/%i: %s' % (i_rs_img + 1, len(rs_niis), rs_img))
    rs_reg_ts = masker.transform(rs_img)
    corr_mat = np.corrcoef(rs_reg_ts.T)
    triu_inds = np.triu_indices(corr_mat.shape[0], 1)
    corr_mat_vect = corr_mat[triu_inds]
    # save for later
    corr_mat_vect_list.append(corr_mat_vect)
corr_mat_vect_array = np.array(corr_mat_vect_list)
print(corr_mat_vect_array.shape) 

#demean
corr_mat_vect_array[np.isnan(corr_mat_vect_array)] = 1

if (nMasks**2-nMasks)/2 == corr_mat_vect_array.shape[1]:
	print corr_mat_vect_array.shape
	np.save(crosscorr, corr_mat_vect_array)
else:
##########################################################################
# Fitting the mask and generating a report
masker.fit(func_filename)

# We can again generate a report, but this time, the provided functional
# image is displayed with the ROI of the atlas.
# The report also contains a summary table giving the region sizes in mm3
report = masker.generate_report()
report

###########################################################################
# Process the data with the NiftiLablesMasker
#
# In order to extract the signals, we need to call transform on the
# functional data
signals = masker.transform(func_filename)
# signals is a 2D matrix, (n_time_points x n_regions)
signals.shape

###########################################################################
# Plot the signals
import matplotlib.pyplot as plt

fig = plt.figure(figsize=(15, 5))
ax = fig.add_subplot(111)
for label_idx in range(3):
    ax.plot(signals[:, label_idx],
            linewidth=2,
            label=atlas.labels[label_idx + 1])  # 0 is background
ax.legend(loc=2)
ax.set_title("Signals for first 3 regions")
def roi_average(roi_paths,
                rois_parent_folder,
                tasks,
                df,
                selected_contrasts=None,
                subject_specific_rois=False,
                roi_masks=None):
    """
    Function to compute the average and std of z-scores for a set of voxels
    inside of subject-specific, functional Regions-of-Interest (ROIs)
    in a set of contrast z-maps.
    """
    # For every ROI, ...
    roi_names = []
    all_rois_contrast_avgs = []
    all_contrast_names = []
    for r, roi_path in enumerate(roi_paths, start=1):
        # ROI name
        roi_name = re.match('.*' + rois_parent_folder + '/(.*).nii.gz',
                            roi_path).groups()[0]
        roi_names.append(roi_name)
        # For every task, ...
        roi_contrast_avgs = []
        contrast_names = []
        for t, task in enumerate(tasks):
            # Select the entries in the data frame only concerned to
            # the ffx z-maps
            task_df = df[df.task == task][df.acquisition == 'ffx']
            contrasts = task_df.contrast.unique()
            if selected_contrasts is not None:
                contrasts = np.intersect1d(selected_contrasts, contrasts)
            # For every contrast, ...
            for contrast in contrasts:
                flatten_list = []
                for s, subject in enumerate(SUBJECTS):
                    img_paths = []
                    # Create the subject-specific ROI mask
                    if subject_specific_rois:
                        if roi_masks is None:
                            raise ValueError('roi_masks not defined!')
                        else:
                            roi = math_img('img == %d' % r, img=roi_masks[s])
                            print('Extracting subject-specific ' + \
                                  '"%s" as ROI for %s from the contrast "%s".'
                                  %(roi_name, subject, contrast))
                    else:
                        roi = roi_path
                        print('Extracting general ' + \
                              '"%s" as ROI for %s from the contrast "%s".'
                              %(roi_name, subject, contrast))
                    masker = NiftiLabelsMasker(labels_img=roi)
                    masker.fit()
                    # Paths of the contrast z-maps of every participants
                    img_path = task_df[task_df.contrast == contrast]\
                                      [task_df.subject == subject]\
                                      .path.values[-1]
                    img_paths.append(img_path)
                    print(img_paths)
                    # For each participant, extract data from z-map
                    # according to the subject-specific ROI mask and
                    # average straight off the values of
                    # the corresponding voxels
                    mask_data = masker.transform(img_paths)
                    flatten_list.extend(mask_data.tolist()[0])
                roi_contrast_avgs.append(flatten_list)
                # Labels of the contrasts
                contrast_names.append(LABELS[contrast][1].values[0] + ' vs. ' +
                                      LABELS[contrast][0].values[0])
        # Append for all ROIs
        all_rois_contrast_avgs.append(roi_contrast_avgs)
        all_contrast_names.append(contrast_names)
    return all_rois_contrast_avgs, all_contrast_names, roi_names
from load_confounds import Params24
from nilearn.input_data import NiftiLabelsMasker
import glob
import numpy as np

####################################################################

configfiles = glob.glob('/home/sana4471/projects/rrg-pbellec/sana4471/movie_decoding_sa/data/friends/derivatives/fmriprep-20.1.0/fmriprep/sub-01/**/*preproc_bold.nii.gz', recursive=True)
configfiles=sorted(configfiles)

print('#################### The number of movie segments:')
print(len(configfiles)) #24*2+ 17*2

#####################################################################

fMRI_T=[]

for path in configfiles: 
    print(path)
    masker= NiftiLabelsMasker(labels_img='MIST_444.nii.gz', standardize=True, detrend=False, smoothing_fwhm=8).fit()
    Data_fmri=masker.transform(path, confounds=Params24().load(path))
    print(Data_fmri.shape)
    fMRI_T.append(Data_fmri)

np.save('Sanajoon_fMRI2.npy', fMRI_T)
print('##################### Done!')

Beispiel #6
0
###############################################################################
##### Optionally, calculate CT and GMD of each DKT region and output csv. #####
###############################################################################

# If either CT or GMD image was provided, fit DKT labels.
if ct_path or gmd_path:
    # Fit DKT labels
    masker = NiftiLabelsMasker(dkt_img)
    masker.fit()

# If CT image was provided, also output csv of CT values by DKT region.
if ct_path:
    # Load cortical thickness image.
    ct_img = nib.load(ct_path)
    # Get cortical thickness values by DKT region.
    ct_values = masker.transform(ct_img)
    # Make values list for output df (subjectId, sessionId, + CT values list).
    ct_values = sub_ses + ct_values.tolist()[0]    
    # Combine columns and values into CT dataframe.
    ct_df = pd.DataFrame(data=[ct_values], columns=columns)
    # Output CT dataframe to csv file.
    ct_df.to_csv(f"{outDir}/{sub}_{ses}_CorticalThickness.csv", index=False)

# If CT image was provided, also output csv of GMD values by DKT region.
if gmd_path: 
    # Load GMD image.
    gmd_img = nib.load(gmd_path)
    # Get GMD values by DKT region.
    gmd_values = masker.transform(gmd_img)
    # Make values list for output df (subjectId, sessionId, + gmd values list).
    gmd_values = sub_ses + gmd_values.tolist()[0]
# data according to given parameters
masker.fit()
# Preparing for data extraction: setting number of conditions, size, etc from
# haxby dataset
condition_names = haxby_labels.unique()
n_cond_img = fmri_data[..., haxby_labels == 'house'].shape[-1]
n_conds = len(condition_names)

X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds))
# Gathering data for each condition and then use transformer from masker
# object transform() on each data. The transformer extracts data in condition
# maps where the target regions are specified by labels images
for i, cond in enumerate(condition_names):
    cond_maps = new_img_like(
        fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img])
    mask_data = masker.transform(cond_maps)
    X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1]
condition_names[np.where(condition_names == 'scrambledpix')] = 'scrambled'

##############################################################################
# save the ROI 'atlas' to a Nifti file
new_img_like(fmri_img, labels).to_filename('mask_atlas.nii.gz')

##############################################################################
# Plot the average in the different condition names
import matplotlib.pyplot as plt

plt.figure(figsize=(15, 7))
for i in np.arange(2):
    plt.subplot(1, 2, i + 1)
    plt.boxplot(X1 if i == 0 else X2)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        temp_sum = np.sum(data != 0, axis=a)
        # temp_sum [temp_sum==0] = 1
        output = np.sum(data, axis=a) / temp_sum
        output[np.isnan(output)] = 0
    return output


ave_loadings_flat = mean_nonzero(rest_loading, 1)
ave_loadings_mat = np.zeros((14, 14))
ave_loadings_mat[idx] = ave_loadings_flat
ave_loadings_mat = ave_loadings_mat + ave_loadings_mat.T
ave_loadings = mean_nonzero(ave_loadings_mat, 1)

ts_len = masker.transform(rs_niis[0]).shape[0]
for i_rs_img, rs_img in enumerate(rs_niis):
    par = '_'.join(rs_img.split(os.sep)[-1].split('_')[0:2])
    outputDIR = resultdir + '_new' + os.sep + par
    if not os.path.exists(outputDIR):
        os.makedirs(outputDIR)
    print('%i/%i: %s' % (i_rs_img + 1, len(rs_niis), par))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=DeprecationWarning)
        rs_reg_ts = masker.transform(rs_img)
    #weighted TS of the average
    cur_all_weightedTS = mean_nonzero(rs_reg_ts * ave_loadings, 1)
    cur_all_txtfn = outputDIR + os.sep + '%s_weightedTS_%s_allcomps.txt' % (
        par, analysis_name)
    np.savetxt(
        cur_all_txtfn,
# here it is assumed that your input lesion image is available at
# '/tmp/lesion.nii.gz'
# change it to any path you like

atlas = fetch_atlas_yeo_2011()
thick17 = atlas['thick_17']
# optionally display the atlas
# plot_roi(thick17)

# separate yeo17 atlas into connected components:
# 122 connected components overall
separate_thick17 = connected_label_regions(thick17)

# display the lesion on top of the atlas
display = plot_img(lesion)
display.add_overlay(thick17, cmap=plt.cm.hsv)

# compute the mean of lesion proportion per network
masker = NiftiLabelsMasker(labels_img=thick17).fit()
network_lesion = masker.transform(lesion)

# redo that on a per'region basis
display = plot_img(lesion)
display.add_overlay(separate_thick17, cmap=plt.cm.hsv)

# compute the mean of lesion proportion per network
separate_masker = NiftiLabelsMasker(labels_img=separate_thick17).fit()
region_lesion = separate_masker.transform(lesion)

plt.show()
Beispiel #10
0
label_atlas_nii.to_filename(roiLabel)
#load saved labels
# label_atlas_nii= nib.load(roiLabel)

masker = NiftiLabelsMasker(labels_img=label_atlas_nii,
                           standardize=True,
                           memory='nilearn_cache',
                           verbose=0)
masker.fit()

corr_mat_vect_list = []
ind_list = []

for i_rs_img, rs_img in enumerate(rs_niis):
    print('%i/%i: %s' % (i_rs_img + 1, len(rs_niis), rs_img))
    rs_reg_ts = masker.transform(rs_img)
    corr_mat = np.corrcoef(rs_reg_ts.T)
    triu_inds = np.triu_indices(corr_mat.shape[0], 1)
    corr_mat_vect = corr_mat[triu_inds]
    # save for later
    corr_mat_vect_list.append(corr_mat_vect)
corr_mat_vect_array = np.array(corr_mat_vect_list)
print(corr_mat_vect_array.shape)

#demean
corr_mat_vect_array[np.isnan(corr_mat_vect_array)] = 1

if (nMasks**2 - nMasks) / 2 == corr_mat_vect_array.shape[1]:
    print corr_mat_vect_array.shape
    np.save(crosscorr, corr_mat_vect_array)
else:
def mean_nonzero(data, a):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        temp_sum = np.sum(data!=0, axis=a)
        # temp_sum [temp_sum==0] = 1 
        output = np.sum(data, axis=a)/temp_sum
        output[np.isnan(output)] = 0
    return output

ave_loadings_flat = mean_nonzero(rest_loading,1)
ave_loadings_mat = np.zeros((14,14))
ave_loadings_mat[idx] = ave_loadings_flat
ave_loadings_mat = ave_loadings_mat + ave_loadings_mat.T
ave_loadings = mean_nonzero(ave_loadings_mat, 1)

ts_len = masker.transform(rs_niis[0]).shape[0]
for i_rs_img, rs_img in enumerate(rs_niis):
    par = '_'.join(rs_img.split(os.sep)[-1].split('_')[0:2])
    outputDIR = resultdir + '_new' + os.sep + par
    if not os.path.exists(outputDIR):
        os.makedirs(outputDIR)
    print('%i/%i: %s' %(i_rs_img + 1, len(rs_niis),par))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=DeprecationWarning)
        rs_reg_ts = masker.transform(rs_img)
    #weighted TS of the average
    cur_all_weightedTS = mean_nonzero(rs_reg_ts*ave_loadings, 1)
    cur_all_txtfn = outputDIR + os.sep + '%s_weightedTS_%s_allcomps.txt' %(par, analysis_name)
    np.savetxt(cur_all_txtfn, cur_all_weightedTS, fmt='%f',)
    for i_comp in range(rest_loading.shape[1]):
        cur_loadings = rest_loading[np.where(rest_loading[:,i_comp]!=0),i_comp].flatten()
Beispiel #12
0

###################################
### Preprocessing per subject #####
###################################

# extract TS per ROI, preprocess and compute correlation matrix
# loop through 86 subjects

print("Starting the ROI time series extraction")
FS = []
for i_nii, nii_path in enumerate(df.fMRI_path.values):
	# each participant has a 4D nii of shape (91, 109, 91, 121)
	print("Currently doing sub", i_nii + 1, "/86")
	# EXTRACT TIME SERIES PER roi
	cur_FS = masker.transform(nii_path)
	# standardize the time series
	cur_FS = StandardScaler().fit_transform(cur_FS)
	# shape (121, 100)
	# deconfounding
	# upload df with motion parameter values
	# confounds = pd.read_excel('C:\\sexualorientproject\\MC_Parameter\\second\\prefiltered_func_data_mcf_{}.xlsx'.format(i_nii+1))
	confounds = pd.read_excel('prefiltered_func_data_mcf_{}.xlsx'.format(i_nii+1))
	confounds = confounds[confounds.columns[1:]].values
	assert confounds.shape == (121, 24)
	# deconfounding
	cur_FS = clean(signals=cur_FS, confounds=confounds, detrend=False)
	# shape (121, 100)
	FS.append(cur_FS)

# np.save("C:\\sexualorientproject\\fmri_FS_ss", FS)
Beispiel #13
0
    n_rois = len(np.unique(rroi_img.get_data())) - 1
    # !fslview dbg_roi_labelimg.nii.gz

    roi_masker = NiftiLabelsMasker(labels_img=rroi_img,
                                   background_label=0,
                                   standardize=True,
                                   smoothing_fwhm=0,
                                   detrend=True,
                                   low_pass=None,
                                   high_pass=None,
                                   memory='nilearn_cache',
                                   verbose=0)
    roi_masker.fit()

    # parse the RS images according to the ROIs
    sub_FS = roi_masker.transform(rs_4D)

    # assert sub_FS.shape == (len(cur_rs_imgs), len(roi_names))
    # dump_path = op.join(rs_base, 'roi_dump_' + ROI_DIR)
    # np.save(dump_path, arr=sub_FS)

    # vectorize
    cross_corr = np.corrcoef(sub_FS.T)
    tril_inds = np.tril_indices_from(cross_corr, k=-1)
    cc_ravel = cross_corr[tril_inds]

    # cross_corrs.append(cc_ravel)
    cur_FS[i_rs] = cc_ravel

    # plt.matshow(cross_corr, cmap=plt.cm.RdBu)
    # plt.title(rs_base)
Beispiel #14
0
    masker.fit()

    allsj_ls = []
    for sj in os.listdir(func_dir):
        sj_dir = os.path.join(func_dir, sj)
        sj_ls = []
        for f in sorted(os.listdir(sj_dir)):
            # read pmod
            sj_num = int(f.split('_')[0].split('-')[1])
            run_num = int(f.split('_')[2].split('-')[1])
            df_pmod = resample_pmod(df, sj_num, run_num)

            # read roi
            func_file = os.path.join(sj_dir, f)
            #         print(func_file)
            mask_data = masker.transform(func_file)
            mask_mean = np.mean(mask_data, 1)

            # make df
            df_len = min([len(mask_mean), len(df_pmod)])
            df_tmp = pd.DataFrame({
                'roi_mean': mask_mean[:df_len],
                'pmod_mean': df_pmod[:df_len],
                'subject': [sj_num] * df_len,
                'session': [run_num] * df_len
            })
            sj_ls.append(df_tmp)
        df_sj = pd.concat(sj_ls)
        allsj_ls.append(df_sj)
    allsj_df = pd.concat(allsj_ls)
    fname = mask_img.split('.')[0].split('/')[1] + '.csv'
Beispiel #15
0
                           memory_level=1,
                           verbose=0,
                           detrend=True,
                           standardize=True,
                           low_pass=0.1,
                           high_pass=0.01,
                           t_r=2.5,
                           resampling_target='labels')
masker.fit()

for suj in range(6):
    result_scores[suj] = {}
    y, session = data_behaviour(suj)
    fmri_filename = haxby.func[suj]
    haxby_mni = resample_img(fmri_filename, template.affine, template.shape)
    fmri = masker.transform(haxby_mni)
    rest_mask = y == b'rest'
    condition_mask = np.logical_or(y == b'cat', y == b'face')
    rest = fmri[rest_mask]
    cond = fmri[condition_mask]
    session_label = session[condition_mask]
    cv = LeaveOneLabelOut(session_label / 2)
    y = y[condition_mask]
    y = (y == b'cat')

    w_name = 'D:/haxby2001/rest/' + str(suj) + '_w_kalofolias.mat'

    for graph_name, param in sorted(graphsname.items()):
        if 'method' not in param:
            param['method'] = False
        if 'spars' not in param:
Beispiel #16
0
    r_atlas_nii = resample_img(
        img=atlas_nii,
        target_affine=mask_file.get_affine(),
        target_shape=mask_file.shape,
        interpolation='nearest'
    )
    r_atlas_nii.to_filename('debug_ratlas.nii.gz')

    from nilearn.input_data import NiftiLabelsMasker
    nlm = NiftiLabelsMasker(
        labels_img=r_atlas_nii, mask_img=mask_file,
        standardize=True, detrend=True)

    nlm.fit()
    FS_regpool = nlm.transform(all_sub_rs_maps)
    np.save('%i_regs_timeseries' % sub_id, FS_regpool)

    # compute network sparse inverse covariance
    from sklearn.covariance import GraphLassoCV
    from nilearn.image import index_img
    from nilearn import plotting

    try:
        gsc_nets = GraphLassoCV(verbose=2, alphas=20)
        gsc_nets.fit(FS_regpool)

        np.save('%i_regs_cov' % sub_id, gsc_nets.covariance_)
        np.save('%i_regs_prec' % sub_id, gsc_nets.precision_)
    except:
        pass
Beispiel #17
0
def reduce(dataset, output_dir=None, direct=False, source='hcp_rs_concat'):
    """Create a reduced version of a given dataset.
        Unmask must be called beforehand"""
    memory = Memory(cachedir=get_cache_dirs()[0], verbose=2)
    print('Fetch data')
    this_dataset_dir = join(get_output_dir(output_dir), 'unmasked', dataset)
    masker, X = get_raw_contrast_data(this_dataset_dir)
    print('Retrieve components')
    if source == 'craddock':
        components = fetch_craddock_parcellation().parcellate400
        niimgs = masker.inverse_transform(X.values)
        label_masker = NiftiLabelsMasker(labels_img=components,
                                         smoothing_fwhm=0,
                                         mask_img=masker.mask_img_).fit()
        # components = label_masker.inverse_transform(np.eye(400))
        print('Transform and fit data')
        Xt = label_masker.transform(niimgs)
    else:
        if source == 'msdl':
            components = [fetch_atlas_msdl()['maps']]
        else:
            data = fetch_atlas_modl()
            if source == 'hcp_rs':
                components_imgs = [data.nips2017_components256]
            elif source == 'hcp_rs_concat':
                components_imgs = [
                    data.nips2017_components16, data.nips2017_components64,
                    data.nips2017_components256
                ]
            elif source == 'hcp_336':
                components_imgs = [data.nips2017_components336]
            elif source == 'hcp_new':
                components_imgs = [
                    data.positive_new_components16,
                    data.positive_new_components64,
                    data.positive_new_components128
                ]
            elif source == 'hcp_new_big':
                components_imgs = [
                    data.positive_new_components16,
                    data.positive_new_components64,
                    data.positive_new_components512
                ]
            elif source == 'hcp_rs_positive_concat':
                components_imgs = [
                    data.positive_components16, data.positive_components64,
                    data.positive_components512
                ]
            elif source == 'hcp_new_208':
                components_imgs = [data.positive_new_components208]

            components = masker.transform(components_imgs)
        print('Transform and fit data')
        proj, proj_inv, _ = memory.cache(make_projection_matrix)(
            components, scale_bases=True)
        if direct:
            proj = proj_inv.T
        Xt = X.dot(proj)
    Xt = pd.DataFrame(data=Xt, index=X.index)
    this_source = source
    if direct:
        this_source += '_direct'
    this_output_dir = join(get_output_dir(output_dir), 'reduced', this_source,
                           dataset)
    if not os.path.exists(this_output_dir):
        os.makedirs(this_output_dir)
    print(join(this_output_dir, 'Xt.pkl'))
    Xt.to_pickle(join(this_output_dir, 'Xt.pkl'))
    dump(masker, join(this_output_dir, 'masker.pkl'))
    np.save(join(output_dir, 'components'), components)
# Prepare masker & Correct affine
template = load_mni152_template()
basc = datasets.fetch_atlas_basc_multiscale_2015(version='sym')['scale444']
orig_filename='F:/sim/template/restbaseline.nii.gz'
orig_img= image.load_img(orig_filename)
brainmask = load_mni152_brain_mask()
mem = Memory('nilearn_cache')
masker = NiftiLabelsMasker(labels_img = basc, mask_img = brainmask, 
                           memory=mem, memory_level=1, verbose=0,
                           detrend=False, standardize=False,  
                           high_pass=0.01,t_r=2,
                           resampling_target='labels')
masker.fit()

# Prep Classification
   
for s in np.arange(84,89):#range(suj):#
    for n in range(meas):
        sim_filename='F:/sim/sim_'+str(s+1)+'_'+ str(n+1)+ '.nii.gz'
        if os.path.exists(sim_filename): 
            sim_img = image.load_img(sim_filename)
            sim=sim_img.get_data()
            data_sim=Nifti1Image(sim,orig_img.affine)
            roi = masker.transform(data_sim)
            save_name= 'F:/sim/mni/roi_mni_'+str(s+1)+'_'+str(n+1)+'.npz'
            np.savez_compressed(save_name,roi=roi) 
            rest=roi[rest_mask]
            sio.savemat('F:/sim/rest/sim_'+str(s+1)+'_'+ str(n+1)+'_rest.mat', {'rest':rest})

        
ratlas_nii = resample_img(
  atlas.maps, target_affine=tmp_nii.affine, interpolation='nearest')
# ratlas_nii.to_filename('debug_ratlas.nii.gz')

# extracting data MRI
FS = []
for i_nii, nii_path in enumerate(df_RLS.data_path.values):
  print(nii_path)
  nii = nib.load(nii_path)
  cur_ratlas_nii = resample_img(
    atlas.maps, target_affine=nii.affine, interpolation='nearest')
  nii_fake4D = nib.Nifti1Image(
    nii.get_data()[:, :, :, None], affine=nii.affine)
  masker = NiftiLabelsMasker(labels_img=ratlas_nii)
  masker.fit()
  cur_FS = masker.transform(nii_fake4D)
  FS.append(cur_FS)
FS = np.array(FS).squeeze()
assert len(df_RLS.data_path.values) == len(FS)
ssFS = StandardScaler().fit_transform(FS)

np.random.seed(42)

# data brain
pca_brain = PCA(n_components=10).fit(ssFS)
Y_ss_brain_pca = pca_brain.transform(ssFS)


#############################
####### CCA Analysis ########
#############################
Beispiel #20
0
# data according to given parameters
masker.fit()
# Preparing for data extraction: setting number of conditions, size, etc from
# haxby dataset
condition_names = haxby_labels.unique()
n_cond_img = fmri_data[..., haxby_labels == 'house'].shape[-1]
n_conds = len(condition_names)

X1, X2 = np.zeros((n_cond_img, n_conds)), np.zeros((n_cond_img, n_conds))
# Gathering data for each condition and then use transformer from masker
# object transform() on each data. The transformer extracts data in condition
# maps where the target regions are specified by labels images
for i, cond in enumerate(condition_names):
    cond_maps = new_img_like(
        fmri_img, fmri_data[..., haxby_labels == cond][..., :n_cond_img])
    mask_data = masker.transform(cond_maps)
    X1[:, i], X2[:, i] = mask_data[:, 0], mask_data[:, 1]
condition_names[np.where(condition_names == 'scrambledpix')] = 'scrambled'

##############################################################################
# save the ROI 'atlas' to a Nifti file
new_img_like(fmri_img, labels).to_filename('mask_atlas.nii.gz')

##############################################################################
# Plot the average in the different condition names
import matplotlib.pyplot as plt

plt.figure(figsize=(15, 7))
for i in np.arange(2):
    plt.subplot(1, 2, i + 1)
    plt.boxplot(X1 if i == 0 else X2)
Beispiel #21
0
                                            pipeline='cpac',
                                            band_pass_filtering=True,
                                            global_signal_regression=True,
                                            derivatives=['func_preproc'],
                                            quality_checked=True,
                                            DX_GROUP=1,
                                            SITE_ID=S)

    print(len(abidedata_func_normal['func_preproc']))
    print(len(abidedata_func_autism['func_preproc']))

    ## Extaction of time series for normal
    abide_ts = []
    abide_ts_normal = []
    for i in range(len(abidedata_func_normal['func_preproc'])):
        ts_nor = glassermasker.transform(
            imgs=abidedata_func_normal['func_preproc'][i])
        ts_normal = ts_nor[:, ind_reord]
        abide_ts_normal.append(ts_normal)
        abide_ts.append(ts_normal)

    ## Extaction of time series for autism

    abide_ts_autism = []
    for i in range(len(abidedata_func_autism['func_preproc'])):
        ts_aut = glassermasker.transform(
            imgs=abidedata_func_autism['func_preproc'][i])
        ts_autism = ts_aut[:, ind_reord]
        abide_ts_autism.append(ts_autism)
        abide_ts.append(ts_autism)

    print(len(abide_ts))