Example #1
0
def generate_fmri_data_for_subject(subject):
    """
	Input : Take as input each fmri file. One file = One block
	Load all fmri data and apply a global mask mak on it. The global mask is computed using the mask from each fmri run (block). 
	Applying a global mask for a subject uniformize the data. 
	Output: Output fmri_runs for a subject, corrected using a global mask
	"""
    fmri_filenames = sorted(
        glob.glob(
            os.path.join(paths.rootpath, "fmri-data/en", "sub-%03d" % subject,
                         "func", "resample*.nii")))

    masks_filenames = sorted(
        glob.glob(
            os.path.join(paths.path2Data, "en/fmri_data/masks",
                         "sub_{}".format(subject), "resample*.pkl")))
    masks = []
    for file in masks_filenames:
        with open(file, 'rb') as f:
            mask = pickle.load(f)
            masks.append(mask)

    global_mask = math_img('img>0.5', img=mean_img(masks))
    masker = MultiNiftiMasker(global_mask, detrend=True, standardize=True)
    masker.fit()
    fmri_runs = [masker.transform(f) for f in tqdm(fmri_filenames)]
    print(fmri_runs[0].shape)

    return fmri_runs
Example #2
0
def create_raw_contrast_data(imgs,
                             mask,
                             raw_dir,
                             memory=Memory(cachedir=None),
                             n_jobs=1,
                             batch_size=100):
    if not os.path.exists(raw_dir):
        os.makedirs(raw_dir)

    # Selection of contrasts
    masker = MultiNiftiMasker(smoothing_fwhm=0,
                              mask_img=mask,
                              memory=memory,
                              memory_level=1,
                              n_jobs=n_jobs).fit()
    mask_img_file = os.path.join(raw_dir, 'mask_img.nii.gz')
    masker.mask_img_.to_filename(mask_img_file)

    batches = gen_batches(len(imgs), batch_size)

    data = np.empty((len(imgs), masker.mask_img_.get_data().sum()),
                    dtype=np.float32)
    for i, batch in enumerate(batches):
        print('Batch %i' % i)
        data[batch] = masker.transform(imgs['z_map'].values[batch])
    imgs = pd.DataFrame(data=data, index=imgs.index, dtype=np.float32)
    imgs.to_pickle(join(raw_dir, 'imgs.pkl'))
Example #3
0
def compute_rec():
    mask_img = fetch_mask()
    masker = MultiNiftiMasker(mask_img=mask_img).fit()
    atlas = fetch_atlas_modl()
    components_imgs = [
        atlas.positive_new_components16, atlas.positive_new_components64,
        atlas.positive_new_components512
    ]
    components = masker.transform(components_imgs)
    proj, proj_inv, rec = make_projection_matrix(components, scale_bases=True)
    dump(rec, join(get_output_dir(), 'benchmark', 'rec.pkl'))
Example #4
0
def generate_fmri_data_for_subject(subject, current_ROI):
	"""
	Input : Take as input each fmri file. One file = One block
	Load all fmri data and apply a global mask mak on it. The global mask is computed using the mask from each fmri run (block). 
	Applying a global mask for a subject uniformize the data. 
	Output: Output fmri_runs for a subject, corrected using a global mask
	"""

	# Get all paths for fmri data
	fmri_filenames = sorted(glob.glob(os.path.join(paths.rootpath, 
												"fmri-data/en",
												"sub-%03d" % subject, 
												"func", 
												"resampled*.nii")))
	
	# Process for All brain
	if current_ROI == -1:
		# Get paths for masks
		masks_filenames = sorted(glob.glob(os.path.join(paths.path2Data,
												"en/fmri_data/masks",
												"sub_{}".format(subject),  
												"resample*.pkl")))
		masks = []
		for file in masks_filenames:
			with open(file, 'rb') as f:
				mask = pickle.load(f)
				masks.append(mask)

		# Compute a global mask for all subject. This way the data will be uniform
		global_mask = math_img('img>0.5', img=mean_img(masks))
		masker = MultiNiftiMasker(global_mask, detrend=True, standardize=True)
		masker.fit()

		# Apply the mask to each fmri run (block)
		fmri_runs = [masker.transform(f) for f in tqdm(fmri_filenames)]
	
	# Process for a  specific ROI
	else:
		# get paths of ROIs masks
		masks_ROIs_filenames = sorted(glob.glob(os.path.join(paths.path2Data, 
												"en/ROIs_masks/",
												"*.nii")))
		# Choose the mask 
		ROI_mask = masks_ROIs_filenames[current_ROI]
		ROI_mask = NiftiMasker(ROI_mask, detrend=True, standardize=True)
		ROI_mask.fit()

		# Apply the mask to each fmri run (block)
		fmri_runs = [ROI_mask.transform(f) for f in fmri_filenames]
		
	return fmri_runs
############################################################################
# Then we prepare and mask the data
# ----------------------------------
import numpy as np
from nilearn.input_data import MultiNiftiMasker

sys.stderr.write("Preprocessing data...")
t0 = time.time()

# Load and mask fMRI data
masker = MultiNiftiMasker(mask_img=miyawaki_dataset.mask,
                          detrend=True,
                          standardize=False)
masker.fit()
X_train = masker.transform(X_random_filenames)
X_test = masker.transform(X_figure_filenames)

# We load the visual stimuli from csv files
y_train = []
for y in y_random_filenames:
    y_train.append(
        np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                   (-1, ) + y_shape,
                   order='F'))

y_test = []
for y in y_figure_filenames:
    y_test.append(
        np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                   (-1, ) + y_shape,
y_shape = (10, 10)

sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))

### Preprocess and mask #######################################################
import numpy as np
from nilearn.input_data import MultiNiftiMasker

sys.stderr.write("Preprocessing data...")
t0 = time.time()

# Load and mask fMRI data
masker = MultiNiftiMasker(mask_img=miyawaki_dataset.mask, detrend=True,
                          standardize=False)
masker.fit()
X_train = masker.transform(X_random_filenames)
X_test = masker.transform(X_figure_filenames)

# Load visual stimuli from csv files
y_train = []
for y in y_random_filenames:
    y_train.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                              (-1,) + y_shape, order='F'))

y_test = []
for y in y_figure_filenames:
    y_test.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                             (-1,) + y_shape, order='F'))

X_train = np.vstack([x[2:] for x in X_train])
y_train = np.vstack([y[:-2] for y in y_train]).astype(float)
# training data starts after the first 12 files
fmri_random_runs_filenames = dataset.func[12:]
stimuli_random_runs_filenames = dataset.label[12:]

##############################################################################
# We can use :func:`nilearn.input_data.MultiNiftiMasker` to load the fMRI
# data, clean and mask it.

import numpy as np
from nilearn.input_data import MultiNiftiMasker

masker = MultiNiftiMasker(mask_img=dataset.mask, detrend=True,
                          standardize=True)
masker.fit()
fmri_data = masker.transform(fmri_random_runs_filenames)

# shape of the binary (i.e. black and wihte values) image in pixels
stimulus_shape = (10, 10)

# We load the visual stimuli from csv files
stimuli = []
for stimulus_run in stimuli_random_runs_filenames:
    stimuli.append(np.reshape(np.loadtxt(stimulus_run,
                              dtype=np.int, delimiter=','),
                              (-1,) + stimulus_shape, order='F'))

##############################################################################
# Let's take a look at some of these binary images:

import pylab as plt
Example #8
0
if source == 'craddock':
    components = fetch_craddock_parcellation().parcellate400
    data = np.ones_like(check_niimg(components).get_data())
    mask = new_img_like(components, data)
    label_masker = NiftiLabelsMasker(labels_img=components,
                                     smoothing_fwhm=0,
                                     mask_img=mask).fit()
    maps_img = label_masker.inverse_transform(maps)
else:
    mask = fetch_mask()
    masker = MultiNiftiMasker(mask_img=mask).fit()

    if source == 'msdl':
        components = fetch_atlas_msdl()['maps']
        components = masker.transform(components)
    elif source in ['hcp_rs', 'hcp_rs_concat', 'hcp_rs_positive']:
        data = fetch_atlas_modl()
        if source == 'hcp_rs':
            components_imgs = [data.nips2017_components64]
        elif source == 'hcp_rs_concat':
            components_imgs = [
                data.nips2017_components16, data.nips2017_components64,
                data.nips2017_components256
            ]
        else:
            components_imgs = [
                data.positive_components16, data.positive_components64,
                data.positive_components512
            ]
        components = masker.transform(components_imgs)
Example #9
0
sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))

### Preprocess and mask #######################################################
import numpy as np
from nilearn.input_data import MultiNiftiMasker

sys.stderr.write("Preprocessing data...")
t0 = time.time()

# Load and mask fMRI data
masker = MultiNiftiMasker(mask_img=dataset.mask,
                          detrend=True,
                          standardize=False)
masker.fit()
X_train = masker.transform(X_random)
X_test = masker.transform(X_figure)

# Load visual stimuli from csv files
y_train = []
for y in y_random:
    y_train.append(
        np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                   (-1, ) + y_shape,
                   order='F'))

y_test = []
for y in y_figure:
    y_test.append(
        np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                   (-1, ) + y_shape,
save_path = 'path for saving the results'
runs = 10 # Select the number of runs

############################ Loading Data #####################################
miyawaki_dataset = fetch_miyawaki2008()

Y_random_filenames = miyawaki_dataset.func[12:]
Y_figure_filenames = miyawaki_dataset.func[:12]
X_random_filenames = miyawaki_dataset.label[12:]
X_figure_filenames = miyawaki_dataset.label[:12]
X_shape = (10, 10)

masker = MultiNiftiMasker(mask_img=miyawaki_dataset.mask, detrend=True,
                          standardize=False)
masker.fit()
fmri_random = masker.transform(Y_random_filenames)
fmri_figure = masker.transform(Y_figure_filenames)

pattern_random = [] 
for x in X_random_filenames:
    pattern_random.append(np.reshape(np.loadtxt(x, dtype=np.int, delimiter=','),
                              (-1,) + X_shape, order='F'))

pattern_figure= []
for x in X_figure_filenames:
    pattern_figure.append(np.reshape(np.loadtxt(x, dtype=np.int, delimiter=','),
                             (-1,) + X_shape, order='F'))
                             
fmri_random = np.vstack([y[2:] for y in fmri_random])
pattern_random = np.vstack([x[:-2] for x in pattern_random]).astype(float)
fmri_figure = np.vstack([y[2:] for y in fmri_figure])
Example #11
0
                                                       len(func_filenames)))

# Build an EPI-based mask because we have no anatomical data
if not os.path.isfile(MASKFILE):
    target_img = nibabel.load(func_filenames[0])
    mask = (target_img.get_data()[..., 0] != 0).astype(int)
    mask_img = nibabel.Nifti1Image(mask, target_img.affine)
    nibabel.save(mask_img, MASKFILE)
else:
    mask_img = nibabel.load(MASKFILE)

# Mask and preproc EPI data
masker = MultiNiftiMasker(mask_img=mask_img, standardize=True)
masker.fit()
if not os.path.isfile(DATAFILE):
    y = np.concatenate(masker.transform(func_filenames), axis=0)
    print(y.shape)
    np.save(DATAFILE, y)
else:
    y = np.load(DATAFILE)
iterator = masker.inverse_transform(y).get_fdata()
iterator = iterator.transpose((3, 0, 1, 2))
iterator = np.expand_dims(iterator, axis=1)
print(iterator.shape)

# Data iterator
manager = DataManager.from_numpy(train_inputs=iterator,
                                 batch_size=BATCH_SIZE,
                                 add_input=True)

# Create model
# training data starts after the first 12 files
fmri_random_runs_filenames = dataset.func[12:]
stimuli_random_runs_filenames = dataset.label[12:]

##############################################################################
# We can use :func:`nilearn.input_data.MultiNiftiMasker` to load the fMRI
# data, clean and mask it.

import numpy as np
from nilearn.input_data import MultiNiftiMasker

masker = MultiNiftiMasker(mask_img=dataset.mask,
                          detrend=True,
                          standardize=True)
masker.fit()
fmri_data = masker.transform(fmri_random_runs_filenames)

# shape of the binary (i.e. black and wihte values) image in pixels
stimulus_shape = (10, 10)

# We load the visual stimuli from csv files
stimuli = []
for stimulus_run in stimuli_random_runs_filenames:
    stimuli.append(
        np.reshape(np.loadtxt(stimulus_run, dtype=np.int, delimiter=','),
                   (-1, ) + stimulus_shape,
                   order='F'))

##############################################################################
# Let's take a look at some of these binary images:
y_shape = (10, 10)

sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))

### Preprocess and mask #######################################################
import numpy as np
from nilearn.input_data import MultiNiftiMasker

sys.stderr.write("Preprocessing data...")
t0 = time.time()

# Load and mask fMRI data
masker = MultiNiftiMasker(mask_img=dataset.mask, detrend=True,
                          standardize=False)
masker.fit()
X_train = masker.transform(X_random)
X_test = masker.transform(X_figure)

# Load visual stimuli from csv files
y_train = []
for y in y_random:
    y_train.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                              (-1,) + y_shape, order='F'))

y_test = []
for y in y_figure:
    y_test.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
                             (-1,) + y_shape, order='F'))

X_train = np.vstack([x[2:] for x in X_train])
y_train = np.vstack([y[:-2] for y in y_train]).astype(float)
Example #14
0
if not os.path.exists('canica.pdf'):
    cc = nibabel.load('canica.nii.gz')

    c = cc.get_data()[:, :, :, 16]
    vmax = np.max(np.abs(c[:, :, 37]))
    c = np.ma.masked_array(
        c, np.logical_not(masker.mask_img_.get_data().astype(bool)))
    pl.imshow(np.rot90(c[:, :, 37]),
              interpolation='nearest',
              vmax=vmax,
              vmin=-vmax,
              cmap='jet')
    pl.savefig('canica.pdf')
    pl.savefig('canica.eps')

smooth_masked = masker.transform(dataset.func)

### Melodic ICA ############################################################
# To have MELODIC results, please use my melodic branch of nilearn

if not os.path.exists('melodic.nii.gz'):
    from nilearn.decomposition.melodic import MelodicICA
    smoothed_func = masker.inverse_transform(smooth_masked)
    melodic = MelodicICA(mask=masker.mask_img_, approach='concat')
    t0 = time.time()
    melodic.fit(smoothed_func)
    print('Melodic: %f' % (time.time() - t0))
    melodic_components = melodic.maps_img_
    nibabel.save(melodic_components, 'melodic.nii.gz')

if not os.path.exists('melodic.pdf'):
Example #15
0
    canica_components = masker.inverse_transform(canica.components_)
    nibabel.save(canica_components, 'canica.nii.gz')

if not os.path.exists('canica.pdf'):
    cc = nibabel.load('canica.nii.gz')

    c = cc.get_data()[:, :, :, 16]
    vmax = np.max(np.abs(c[:, :, 37]))
    c = np.ma.masked_array(c,
            np.logical_not(masker.mask_img_.get_data().astype(bool)))
    pl.imshow(np.rot90(c[:, :, 37]),
            interpolation='nearest', vmax=vmax, vmin=-vmax, cmap='jet')
    pl.savefig('canica.pdf')
    pl.savefig('canica.eps')

smooth_masked = masker.transform(dataset.func)

### Melodic ICA ############################################################
# To have MELODIC results, please use my melodic branch of nilearn

if not os.path.exists('melodic.nii.gz'):
    from nilearn.decomposition.melodic import MelodicICA
    smoothed_func = masker.inverse_transform(smooth_masked)
    melodic = MelodicICA(mask=masker.mask_img_, approach='concat')
    t0 = time.time()
    melodic.fit(smoothed_func)
    print('Melodic: %f' % (time.time() - t0))
    melodic_components = melodic.maps_img_
    nibabel.save(melodic_components, 'melodic.nii.gz')

if not os.path.exists('melodic.pdf'):
Example #16
0
            dim = int(dim)
        indices.append((atlas, dim, sub, ses))
        filenames.append(join(write_dir, filename))
records = pd.DataFrame(index=pd.MultiIndex.from_tuples(indices,
                                                       names=[
                                                           'atlas',
                                                           'dimension',
                                                           'subject',
                                                           'session',
                                                       ]),
                       data=filenames,
                       columns=['filename'])
records.sort_index(inplace=True)

masker = MultiNiftiMasker(mask_img=mask_gm, n_jobs=1).fit()
r2s = masker.transform(records['filename'].values)
r2s = np.concatenate(r2s, axis=0)
r2s[r2s < 0] = 0
r2s = pd.DataFrame(data=r2s, index=records.index)
r2s.sort_index(inplace=True)
ref = r2s.loc['none', 'none'].values.ravel()[:, None]

imgs = [masker.inverse_transform(r2) for _, r2 in r2s.iterrows()]
records['nifti'] = imgs

subject, session = 4, 3
imgs_to_plot = records['nifti'].loc[pd.IndexSlice[:, :, subject, session]]
cut_coords = -55, -38, 2
vmax = r2s.loc[pd.IndexSlice[:, :, subject, session], :].values.max()

fig, axes = plt.subplots(len(imgs_to_plot),