def nilearn_smoothing(in_files, fwhm, out_prefix):
    """
    Using nilearn function to do smoothing, it seems better than SPM
    Args:
        in_files:
        fwhm:
        out_prefix:

    Returns:

    """
    from nilearn.image import smooth_img
    import os

    file_name = os.path.basename(in_files)
    if fwhm == 0:
        smoothed_data = smooth_img(in_files, None)
    else:
        smoothed_data = smooth_img(in_files, fwhm)
    smoothed_data.to_filename(out_prefix + file_name)

    smoothed_files = os.path.abspath(
        os.path.join(os.getcwd(), out_prefix + file_name))

    return smoothed_files
Esempio n. 2
0
    def transform(self, X, y=None, **kwargs):

        if isinstance(X, list) and len(X) == 1:
            smoothed_img = smooth_img(X[0], fwhm=self.fwhm)
        elif isinstance(X, str):
            smoothed_img = smooth_img(X, fwhm=self.fwhm)
        else:
            smoothed_img = smooth_img(X, fwhm=self.fwhm)

        if not self.output_img:
            if isinstance(smoothed_img, list):
                smoothed_img = np.asarray([img.dataobj for img in smoothed_img])
            else:
                return smoothed_img.dataobj
        return smoothed_img
Esempio n. 3
0
 def create_dataset(self, start_time, end_time, delay, fmri_end, eeg_path,
                    fmri_path):
     vector_exclude = [
         'EOG', 'ECG', 'CW1', 'CW2', 'CW3', 'CW4', 'CW5', 'CW6', 'Status'
     ]
     raw = mne.io.read_raw_edf(eeg_path, exclude=vector_exclude)
     eeg = raw.get_data()
     eeg = mne.filter.filter_data(eeg, sfreq=1000, l_freq=5, h_freq=100)
     eeg_flip = np.fliplr(eeg)
     fmri_im = image.smooth_img(fmri_path, fwhm=6)
     fmri = get_masked_fmri(fmri_im, "sub")
     start = start_time
     end = start_time + self.segment_length
     x_list = []
     y_list = []
     x_fl_list = []
     while end < eeg.shape[1] and end <= fmri_end and end < end_time:
         signal = eeg[..., start:end]
         signal_flip = eeg_flip[..., start:end]
         x = eeg_transform(signal)
         y = fmri_transform(end, fmri, delay, fmri_end)
         x1 = eeg_transform(signal_flip)
         x_list.append(x)
         y_list.append(y)
         x_fl_list.append(x1)
         start += self.step
         end += self.step
     x_list = np.array(x_list)
     x_fl_list = np.array(x_fl_list)
     y_list = np.array(y_list)
     return x_list, y_list, x_fl_list
Esempio n. 4
0
    def __init__(self, root_dir, file_path, list_IDs, labels, format, transform=None):
        # event_file could be added when real data is available.
        # add this argument later: tsv_file
        """
        Args:
            file_path (string): Path to the (fmri) file (nifti or npy).
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample, i.e. flattening and subsampling.
        """

        self.root_dir = root_dir
        self.file_path = file_path
        self.labels = labels
        self.path = os.path.join(self.root_dir, self.file_path)

        if format is 'nifti':
            self.subject_frames = smooth_img(self.path, fwhm=None).get_fdata()
            self.subject_frames = self.subject_frames[:, :, :, list_IDs]

        else:

            images = np.load(self.path, encoding='bytes')
            images = images[list_IDs, :, :, :]
            images = np.reshape(images, (images.shape[1], images.shape[2], images.shape[3], images.shape[0]))
            self.subject_frames = images

        self.transform = transform
Esempio n. 5
0
    def test_multi_subject_smoothing(self):
        # nilearn
        from nilearn.image import smooth_img

        nilearn_smoothed_img = smooth_img(self.X[0:3], fwhm=[3, 3, 3])
        nilearn_smoothed_array = nilearn_smoothed_img[1].dataobj

        # photon
        smoother = PipelineElement("SmoothImages", hyperparameters={}, fwhm=3)
        photon_smoothed_array, _, _ = smoother.transform(self.X[0:3])

        branch = NeuroBranch("NeuroBranch", output_img=True)
        branch += smoother
        photon_smoothed_img, _, _ = branch.transform(self.X[0:3])

        # assert
        self.assertIsInstance(photon_smoothed_array, np.ndarray)
        self.assertIsInstance(photon_smoothed_img[0], Nifti1Image)

        self.assertTrue(
            np.array_equal(photon_smoothed_array[1], nilearn_smoothed_array)
        )
        self.assertTrue(
            np.array_equal(
                photon_smoothed_img[1].dataobj, nilearn_smoothed_img[1].dataobj
            )
        )
Esempio n. 6
0
def p_map(task, run, p_values_3d, threshold=0.05):
    """
    Generate three thresholded p-value maps.

    Parameters
    ----------
    task: int
        Task number
    run: int
        Run number
    p_value_3d: 3D array of p_value.
    threshold: The cutoff value to determine significant voxels.

    Returns
    -------
    threshold p-value images
    """
    fmri_img = image.smooth_img('../../../data/sub001/BOLD/' + 'task00' +
                                str(task) + '_run00' + str(run) +
                                '/filtered_func_data_mni.nii.gz',
                                fwhm=6)

    mean_img = image.mean_img(fmri_img)

    log_p_values = -np.log10(p_values_3d)
    log_p_values[np.isnan(log_p_values)] = 0.
    log_p_values[log_p_values > 10.] = 10.
    log_p_values[log_p_values < -np.log10(threshold)] = 0
    plot_stat_map(nib.Nifti1Image(log_p_values, fmri_img.get_affine()),
                  mean_img,
                  title="Thresholded p-values",
                  annotate=False,
                  colorbar=True)
 def smoothImages(self, images, threshold):
     smoothedImages = []
     for i in range(len(images)):
         smoothedImage = smooth_img(images[i], threshold)
         smoothedImages.append(smoothedImage)
         print("Image Smoothened")
     return smoothedImages
Esempio n. 8
0
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False):
    bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
    bold_path = os.path.join(DATA_DIR, subj, bold_path)
    template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
    warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz')

    output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num)

    if force_warp or not os.path.exists(output_path):
        print 'Warping image #%i...' % num
        subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float'])
    else:
        print 'Reusing cached warp image #%i' % num

    print 'Loading image #%i...' % num
    bold = load(output_path)

    masker = NiftiMasker(load(MASK_FILE))
    # masker = niftimasker(load(MASK_FILE), detrend=true, smoothing_fwhm=4.0,
    #                     high_pass=0.01, t_r=2.0, standardize=true)
    masker.fit()
    print 'Removing confounds from image #%i...' % num
    data = masker.transform(bold, confounds(num, subj))
    print 'Detrending image #%i...' % num
    filtered = np.float32(savgol_filter(data, 61, 5, axis=0))
    img = masker.inverse_transform(data-filtered)
    print 'Smoothing image #%i...' % num
    img = image.smooth_img(img, 4.0)
    print 'Saving image #%i...' % num
    save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
    print 'Finished with image #%i' % num
Esempio n. 9
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = [os.path.join(os.path.dirname(img), resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [os.path.join(os.path.dirname(img), smooth_prefix + resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), smooth_prefix + resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
Esempio n. 10
0
    def transform(self, imgs, confounds=None):
        """

        Parameters
        ----------
        imgs: list of Niimg-like objects
        """
        self._check_fitted()

        if self.smoothing_fwhm:
            imgs = smooth_img(imgs, self.smoothing_fwhm)

        imgs = [_utils.check_niimg_3d(img) for img in imgs]

        for i, roi in enumerate(self.mask_img_):
            masker = NiftiMasker(mask_img=roi)
            x = masker.fit_transform(imgs)
            if self.extract_funcs is not None:
                x = np.array([FDICT[f][0](x, **FDICT[f][1]) for f in self.extract_funcs])
            if i == 0:
                X = x
            else:
                X = np.concatenate((X, x), axis=0)

        return X.swapaxes(0, 1)
Esempio n. 11
0
def load_fmri(filename, maskname='', sigma=3):
    """
		Reads 4D fmri data. 
		Smooths using 3D Gaussian filter
		Applies mask to data: 
			- If mask not provided, calculates binary mask
		returns fmri data matrix (Time x Voxels)
	"""
    img = nib.load(filename)
    print(img.shape)
    rep_time = img.header.get_zooms()[-1]
    img = image.smooth_img(img, sigma)
    if maskname != '':
        img_mask = nib.load(maskname)
    else:
        print('Mask not provided. Calculating mask ...')
        img_mask = masking.compute_background_mask(img)
    img = masking.apply_mask(img, img_mask)
    print('Mask applied!')
    print('Detrending data!')
    img = signal.clean(img,
                       detrend=True,
                       high_pass=0.01,
                       standardize=False,
                       t_r=rep_time)
    return img
Esempio n. 12
0
def transformation(filename):
    from nilearn import plotting
    from nilearn import datasets
    import scipy.ndimage as ndimage
    img = nib.load(filename)
    print(img.get_fdata().shape)
    flip_x = np.flip(img.get_fdata(), 0)
    flip_y = np.flip(img.get_fdata(), 1)
    flip_z = np.flip(img.get_fdata(), 2)
    flip_img_x = nib.Nifti1Image(flip_x, affine=np.eye(4))
    flip_img_y = nib.Nifti1Image(flip_y, affine=np.eye(4))
    flip_img_z = nib.Nifti1Image(flip_z, affine=np.eye(4))
    gaussian_img = nib.Nifti1Image(ndimage.gaussian_filter(img.get_fdata(),
                                                           sigma=(5, 5, 0),
                                                           order=0),
                                   affine=np.eye(4))
    from nilearn import image
    smooth_img = image.smooth_img(img, [50.1, 10.0, 0.2])
    nib.save(flip_img_x,
             os.path.join('images', 'flip_x.' + filename.split('/')[-1]))
    nib.save(flip_img_y,
             os.path.join('images', 'flip_y.' + filename.split('/')[-1]))
    nib.save(flip_img_z,
             os.path.join('images', 'flip_z.' + filename.split('/')[-1]))
    nib.save(smooth_img,
             os.path.join('images', 'smooth.' + filename.split('/')[-1]))
    print(type(smooth_img))
    print(type(gaussian_img))
    nib.save(gaussian_img,
             os.path.join('images', 'gaussian.' + filename.split('/')[-1]))
Esempio n. 13
0
    def transform(self, imgs, confounds=None):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None and self.smoothing_fwhm is not None:
            if self.resampling is not None:
                if not os.path.exists(path_first_resampled) and not os.path.exists(path_first_smoothed):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = []
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = []
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return self.masker.transform(imgs)
Esempio n. 14
0
def random_blur(image, mean, std):
    """
    mean: mean fwhm in millimeters.
    std: standard deviation of fwhm in millimeters.
    """
    return smooth_img(image,
                      fwhm=np.abs(np.random.normal(mean, std, 3)).tolist())
Esempio n. 15
0
def __main__():
    volume = image.index_img("E:\\Users\\Niall\\Documents\\Computer Science\\FinalYearProject\\data\\ds105_raw\\ds105\\sub001\\BOLD\\task001_run001\\bold.nii.gz", 0)
    smoothed_img = image.smooth_img(volume, fwhm=5)

    # print("Read the images");

    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=True, display_mode='xz')
    plotting.plot_glass_brain(volume, title='plot_glass_brain',
    black_bg=False, display_mode='xz')

    plt.show()

    # print("Finished");



    #gemerate some numbers
    t = np.linspace(1, 10, 2000)  # 2000 points between 1 and 10
    t

    #plot the graph
    plt.plot(t, np.cos(t))
    plt.ylabel('Subject Response')
    plt.show()
def smooth(img):
    # we need to preserve the original header because the smoothing function
    # f***s the TR up
    nimg = smooth_img(img, fwhm=2.0)
    return nb.Nifti1Image(nimg.get_data(),
                          img.get_affine(),
                          header=img.get_header())
Esempio n. 17
0
    def test_002(self):
        from roistats import collect
        from nilearn import plotting as nip
        from nilearn import image
        from nilearn import datasets
        from roistats import atlases, plotting
        import random
        import numpy as np
        import pandas as pd

        atlas = datasets.load_mni152_brain_mask()
        atlas_fp = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr50-2mm')['maps']
        try:
            labels = atlases.labels('HarvardOxford-Cortical.xml')
        except Exception:
            from nilearn import datasets

            name = 'cort-maxprob-thr50-2mm'
            labels = datasets.fetch_atlas_harvard_oxford(name)['labels']

            # Store them in a dict
            labels = dict(enumerate(labels))
        print(atlas_fp)

        images = []
        for i in range(0, 50):
            d = np.random.rand(*atlas.dataobj.shape) * atlas.dataobj
            im = image.new_img_like(atlas, d)
            im = image.smooth_img(im, 5)
            _, f = tempfile.mkstemp(suffix='.nii.gz')
            im.to_filename(f)
            images.append(f)

        for each in images[:3]:
            nip.plot_roi(atlas_fp, bg_img=each)

        df = collect.roistats_from_maps(images, atlas_fp, subjects=None)
        df = df.rename(columns=labels)

        df['group'] = df.index
        df['age'] = df.apply(lambda row: random.random()*5+50, axis=1)
        df['group'] = df.apply(lambda row: row['group'] < len(df)/2, axis=1)
        df['index'] = df.index

        plotting.boxplot('Temporal Pole', df, covariates=['age'], by='group')

        _ = plotting.lmplot('Temporal Pole', 'age', df, covariates=[],
                            hue='group', palette='default')

        cov = df[['age', 'group']]
        melt = pd.melt(df, id_vars='index', value_vars=labels.values(),
                       var_name='region').set_index('index')
        melt = melt.join(cov)
        plotting.hist(melt, by='group', region_colname='region',
                      covariates=['age'])

        print('Removing images')
        import os
        for e in images:
            os.unlink(e)
Esempio n. 18
0
def smooth(img):
    # we need to preserve the original header because the smoothing function
    # f***s the TR up
    nimg = smooth_img(img, fwhm=2.0)
    return nb.Nifti1Image(nimg.get_data(),
                          img.get_affine(),
                          header=img.get_header())
Esempio n. 19
0
    def test_single_subject_smoothing(self):

        # nilearn
        nilearn_smoothed_img = smooth_img(self.X[0], fwhm=[3, 3, 3])
        nilearn_smoothed_array = nilearn_smoothed_img.dataobj

        # photon
        smoother = PipelineElement('SmoothImages',
                                   hyperparameters={},
                                   fwhm=3,
                                   batch_size=1)
        photon_smoothed_array, _, _ = smoother.transform(self.X[0])

        branch = NeuroBranch('NeuroBranch', output_img=True)
        branch += smoother
        photon_smoothed_img, _, _ = branch.transform(self.X[0])

        # assert
        self.assertIsInstance(photon_smoothed_array, np.ndarray)
        self.assertIsInstance(photon_smoothed_img, Nifti1Image)

        self.assertTrue(
            np.array_equal(photon_smoothed_array, nilearn_smoothed_array))
        self.assertTrue(
            np.array_equal(photon_smoothed_img.dataobj,
                           nilearn_smoothed_img.dataobj))
def p_map(task, run, p_values_3d, threshold=0.05):
    """
    Generate three thresholded p-value maps.

    Parameters
    ----------
    task: int
        Task number
    run: int
        Run number
    p_value_3d: 3D array of p_value.
    threshold: The cutoff value to determine significant voxels.

    Returns
    -------
    threshold p-value images
    """
    fmri_img = image.smooth_img('../../../data/sub001/BOLD/' + 'task00' +
                                str(task) + '_run00' + str(run) +
                                '/filtered_func_data_mni.nii.gz',
                                fwhm=6)

    mean_img = image.mean_img(fmri_img)

    log_p_values = -np.log10(p_values_3d)
    log_p_values[np.isnan(log_p_values)] = 0.
    log_p_values[log_p_values > 10.] = 10.
    log_p_values[log_p_values < -np.log10(threshold)] = 0
    plot_stat_map(nib.Nifti1Image(log_p_values, fmri_img.get_affine()),
                  mean_img, title="Thresholded p-values",
                  annotate=False, colorbar=True)
Esempio n. 21
0
    def transform(self, imgs, confounds=None):
        """

        Parameters
        ----------
        imgs: list of Niimg-like objects
        """
        self._check_fitted()

        if self.smoothing_fwhm:
            imgs = smooth_img(imgs, self.smoothing_fwhm)

        imgs = [_utils.check_niimg_3d(img) for img in imgs]

        for i, roi in enumerate(self.mask_img_):
            masker = NiftiMasker(mask_img=roi)
            x = masker.fit_transform(imgs)
            if self.extract_funcs is not None:
                x = np.array([
                    FDICT[f][0](x, **FDICT[f][1]) for f in self.extract_funcs
                ])
            if i == 0:
                X = x
            else:
                X = np.concatenate((X, x), axis=0)

        return X.swapaxes(0, 1)
Esempio n. 22
0
def gaussian_coord_smoothing(coords,
                             mask_img=None,
                             target_affine=None,
                             fwhm=9.0):
    masker = get_masker(mask_img, target_affine)
    peaks_img = coords_to_peaks_img(coords, mask_img=masker.mask_img_)
    img = image.smooth_img(peaks_img, fwhm=fwhm)
    return masker.inverse_transform(masker.transform(img).squeeze())
def get_avmovie_data(sub, run):

    """Get a clean nifti image for one subject and one avmovie run"""

    src = NIFTI_SRC.format(sub=sub, run=run)
    print('Reading {}'.format(src))
    nft = image.smooth_img(image.clean_img(src), data.SMOOTHING)
    print('Done')
    return nft
Esempio n. 24
0
def smooth_img(in_file, fwhm, out_file=None):
    """ Use nilearn.image.smooth_img.
    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    import nilearn.image as niimg
    return niimg.smooth_img(in_file, fwhm=fwhm)
Esempio n. 25
0
def smooth_img(in_file, fwhm, out_file=None):
    """ Use nilearn.image.smooth_img.
    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    import nilearn.image as niimg
    return niimg.smooth_img(in_file, fwhm=fwhm)
Esempio n. 26
0
    def _run_interface(self, runtime):

        from nilearn import image
        import nibabel as nib
        import numpy as np

        smoothed_img = image.smooth_img(self.inputs.in_file, self.inputs.fwhm)
        nib.save(nib.Nifti1Image(smoothed_img, nib.load(self.inputs.in_file).affine), 'smoothed.nii.gz')

        return runtime
Esempio n. 27
0
def smoothNi(PATH_GZ, fwhm):
    from nilearn import image
    import os

    F_smooth = image.smooth_img(PATH_GZ, fwhm=fwhm)
    OutFile = 's' + PATH_GZ[PATH_GZ.rfind('/') + 1:]
    F_smooth.to_filename(OutFile)

    out_file = os.path.abspath(OutFile)
    return out_file
Esempio n. 28
0
 def create_smoothed_image(self,
                           fwhm=4,
                           in_prefix='mri/mwp1{}.nii',
                           out_prefix='mri_smoothed/{}.nii'):
     for person in self.persons:
         path = os.path.join(self.file_dir,
                             in_prefix.format(person.filename))
         origin_nii = nib.load(path)
         nii = image.smooth_img(origin_nii, fwhm)
         self.save_nii(person, nii, nii_prefix=out_prefix)
Esempio n. 29
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(imgs, list) else check_niimg_3d(imgs)

        return imgs
Esempio n. 30
0
def dump_comps(masker,
               compressor,
               components,
               threshold=2,
               fwhm=None,
               perc=None):
    from scipy.stats import zscore
    from nilearn.plotting import plot_stat_map
    from nilearn.image import smooth_img
    from scipy.stats import scoreatpercentile

    if isinstance(compressor, basestring):
        comp_name = compressor
    else:
        comp_name = compressor.__str__().split('(')[0]

    for i_c, comp in enumerate(components):
        path_mask = op.join(WRITE_DIR,
                            '%s_%i-%i' % (comp_name, n_comp, i_c + 1))
        nii_raw = masker.inverse_transform(comp)
        nii_raw.to_filename(path_mask + '.nii.gz')

        comp_z = zscore(comp)

        if perc is not None:
            cur_thresh = scoreatpercentile(np.abs(comp_z), per=perc)
            path_mask += '_perc%i' % perc
            print('Applying percentile %.2f (threshold: %.2f)' %
                  (perc, cur_thresh))
        else:
            cur_thresh = threshold
            path_mask += '_thr%.2f' % cur_thresh
            print('Applying threshold: %.2f' % cur_thresh)

        nii_z = masker.inverse_transform(comp_z)
        gz_path = path_mask + '_zmap.nii.gz'
        nii_z.to_filename(gz_path)
        plot_stat_map(gz_path,
                      bg_img='colin.nii',
                      threshold=cur_thresh,
                      cut_coords=(0, -2, 0),
                      draw_cross=False,
                      output_file=path_mask + 'zmap.png')

        # optional: do smoothing
        if fwhm is not None:
            nii_z_fwhm = smooth_img(nii_z, fwhm=fwhm)
            gz_mm_path = path_mask + '_zmap_%imm.nii.gz' % fwhm
            nii_z_fwhm.to_filename(gz_mm_path)
            plot_stat_map(nii_z_fwhm,
                          bg_img='colin.nii',
                          threshold=cur_thresh,
                          cut_coords=(0, -2, 0),
                          draw_cross=False,
                          output_file=path_mask + ('zmap_%imm.png' % fwhm))
Esempio n. 31
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(
                imgs, list) else check_niimg_3d(imgs)

        return imgs
Esempio n. 32
0
def subject_data(sub):

    sessions = np.zeros(360)
    sessions[:90] = 1
    sessions[90:180] = 2
    sessions[180:270] = 3
    sessions[270:] = 4
    return image.smooth_img(
        image.clean_img(image.concat_imgs(
            src.format(sub=sub) for src in NIFTI_SRC),
                        sessions=sessions), SMOOTHING)
def smooth_concat_files(concat_files, fwhm=4.4, verbose=False, rerun=True):
    filenames = []
    for filey in concat_files:
        if verbose: print("Smoothing %s" % k)
        smoothed = image.smooth_img(filey, fwhm)
        smooth_name = filey.replace('concat',
                                    'concatsmoothed-fwhm%s' % str(fwhm))
        if rerun or not (path.exists(smooth_name)):
            smoothed.to_filename(smooth_name)
        filenames.append(smooth_name)
    return filenames
Esempio n. 34
0
def make_roi_mask(atlas,
                  betas_example,
                  roi_id,
                  fwhm=None,
                  interpolation='nearest'):
    '''
    Extract ROI-specific mask from Atlas in T1w space and sample it down to have
    the dimensions of the to-be-masked image of beta coefficients

    Args:
        atlas (Nifti1Image):
            Atlas with voxel values indicative of ROI identity
        betas_example (Nifti1Image):
            Example image of GLM coefficients which will be masked later (only needed for shape and affine)
        roi_id (int):
            ROI-specific integer used in "atlas"
        fwhm (float OR np.ndarray OR None):
            FWHM in mm (along each axis, axis-specific OR skip smoothing)
        interpolation (str):
            Interpolation method by nilearn.image.resample_img (consult nilearn documentation for other options)
    
    Returns:
        mask_nifti_r (Nifti1Image):
            resampled ROI-specific mask image (target dimensions)
        mask_nifti_s (Nifti1Image):
            smoothed ROI-specific mask image (original dimensions)
        mask_nifti (Nifti1Image):
            original binary ROI-specific mask image

    '''

    # Extract atlas and target nii data
    atlas_array = atlas.get_fdata()
    atlas_affine = atlas.affine.copy()
    betas_array = betas_example.get_fdata()
    betas_affine = betas_example.affine.copy()

    # Extract ROI-specific mask and resample it
    mask = (atlas_array == roi_id).astype(float)
    mask_nifti = nifti1.Nifti1Image(mask, atlas_affine)

    # Gaussian smoothing of mask
    if fwhm is not None:
        mask_nifti_s = image.smooth_img(mask_nifti, fwhm)
    else:
        mask_nifti_s = mask_nifti

    # Resample mask
    mask_nifti_r = image.resample_img(mask_nifti_s,
                                      target_affine=betas_affine,
                                      target_shape=betas_array.shape,
                                      interpolation=interpolation,
                                      copy=True)
    return mask_nifti_r, mask_nifti_s, mask_nifti
Esempio n. 35
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0],
                                           str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(
            os.path.dirname(path_first),
            resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(
            os.path.dirname(path_first),
            smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(
                    path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs,
                                        target_affine=np.diag(self.resampling *
                                                              np.ones(3)))
                else:
                    imgs = [
                        os.path.join(os.path.dirname(img), resample_prefix +
                                     os.path.basename(img))
                        if isinstance(img, str) else os.path.join(
                            os.path.dirname(
                                img.get_filename()), resample_prefix +
                            os.path.basename(img.get_filename()))
                        for img in imgs
                    ]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [
                        os.path.join(
                            os.path.dirname(img), smooth_prefix +
                            resample_prefix + os.path.basename(img))
                        if isinstance(img, str) else os.path.join(
                            os.path.dirname(img.get_filename()),
                            smooth_prefix + resample_prefix +
                            os.path.basename(img.get_filename()))
                        for img in imgs
                    ]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
Esempio n. 36
0
def make_roi_mask(seed, ref_affine, fwhm=5):
    """ """
    if len(seed) == 3:
        seed = np.hstack((seed, 1))

    pos = np.dot(np.linalg.inv(ref_affine), seed).astype(np.int)[:3]
    seed_mask = np.zeros(ref_shape)
    seed_mask[pos[0], pos[1], pos[2]] = 1.
    seed_img = nib.Nifti1Image(seed_mask, ref_affine)
    seed_img = smooth_img(seed_img, fwhm)
    seed_img = math_img('img > 1.e-6', img=seed_img)
    return seed_img
Esempio n. 37
0
def nii2jpg(inFile=None,
            outFile=None,
            cutCoords=(3, 3, 3),
            displayMode='ortho'):
    epiImage = image.mean_img(inFile)
    epiImage = image.smooth_img(epiImage, 'fast')
    plotting.plot_epi(epi_img=epiImage,
                      cut_coords=cutCoords,
                      output_file=outFile,
                      display_mode=displayMode,
                      annotate=False,
                      draw_cross=False)
def compute_mean_epi(fmri_filenames, output_pathway):
    fmri_img = smooth_img(fmri_filenames, fwhm=5)
    mean_epi = mean_img(fmri_img)

    # Save
    mean_epi.to_filename(os.path.join(output_pathway, 'mean_epi.nii.gz'))

    # Plot
    plotting.plot_epi(mean_epi, title='Smoothed mean EPI',
                      output_file=os.path.join(output_pathway, 'mean_epi.png'))

    return mean_epi
Esempio n. 39
0
def coords_to_img(coords, fwhm=9.0):
    mask = load_mni152_brain_mask()
    masker = NiftiMasker(mask).fit()
    voxels = np.asarray(
        image.coord_transform(*coords.T, np.linalg.pinv(mask.affine)),
        dtype=int,
    ).T
    peaks = np.zeros(mask.shape)
    np.add.at(peaks, tuple(voxels.T), 1.0)
    peaks_img = image.new_img_like(mask, peaks)
    img = image.smooth_img(peaks_img, fwhm=fwhm)
    img = masker.inverse_transform(masker.transform(img).squeeze())
    return img
def save_fake_map(orig_inside_mask, map_nii, row, mask_nii, t_quantiles, t_values, i, j):
    np.random.seed(j)
    new_data = np.random.choice(orig_inside_mask, size=map_nii.shape, replace=True)

    shuffled_nii = nb.Nifti1Image(new_data, map_nii.affine, map_nii.header)
    smoothed_nii = smooth_img(shuffled_nii, np.array([row.FWHMx_mm, row.FWHMy_mm, row.FWHMz_mm]))
    new_data = smoothed_nii.get_data()
    new_data[mask_nii.get_data() != 1] = np.nan
    new_inside_mask = stats.zscore(new_data[mask_nii.get_data() == 1])

    new_data[mask_nii.get_data() == 1] = new_inside_mask #hist_match(new_inside_mask, t_quantiles, t_values)

    masked_nii = nb.Nifti1Image(new_data, map_nii.affine, map_nii.header)

    masked_nii.to_filename(data_location +"/images/fake_maps/%04d/%04d.nii.gz"%(i, j))
Esempio n. 41
0
def dump_comps(masker, compressor, components, threshold=2, fwhm=None,
               perc=None):
    from scipy.stats import zscore
    from nilearn.plotting import plot_stat_map
    from nilearn.image import smooth_img
    from scipy.stats import scoreatpercentile

    if isinstance(compressor, basestring):
        comp_name = compressor
    else:
        comp_name = compressor.__str__().split('(')[0]

    for i_c, comp in enumerate(components):
        path_mask = op.join(WRITE_DIR, '%s_%i-%i' % (comp_name,
                                                     n_comp, i_c + 1))
        nii_raw = masker.inverse_transform(comp)
        nii_raw.to_filename(path_mask + '.nii.gz')
        
        comp_z = zscore(comp)
        
        if perc is not None:
            cur_thresh = scoreatpercentile(np.abs(comp_z), per=perc)
            path_mask += '_perc%i' % perc
            print('Applying percentile %.2f (threshold: %.2f)' % (perc, cur_thresh))
        else:
            cur_thresh = threshold
            path_mask += '_thr%.2f' % cur_thresh
            print('Applying threshold: %.2f' % cur_thresh)

        nii_z = masker.inverse_transform(comp_z)
        gz_path = path_mask + '_zmap.nii.gz'
        nii_z.to_filename(gz_path)
        plot_stat_map(gz_path, bg_img='colin.nii', threshold=cur_thresh,
                      cut_coords=(0, -2, 0), draw_cross=False,
                      output_file=path_mask + 'zmap.png')
                      
        # optional: do smoothing
        if fwhm is not None:
            nii_z_fwhm = smooth_img(nii_z, fwhm=fwhm)
            plot_stat_map(nii_z_fwhm, bg_img='colin.nii', threshold=cur_thresh,
                          cut_coords=(0, -2, 0), draw_cross=False,
                          output_file=path_mask +
                          ('zmap_%imm.png' % fwhm))
Esempio n. 42
0
def make_new_noise(masker):
    # CONST
    n_random_niis = 1000
    n_random_foci = 200, 300  # random amount of foci per study
    fwhm_range = (4, 20)

    print "Inventing NOISE images from Gaussians..."
    # generate some noise niftis
    inds = np.where(masker.mask_img_.get_data())  # grid of locations
    x_inds, y_inds, z_inds = inds  # unpack tuple
    list_niis = []
    for inii in xrange(n_random_niis):
        # if inii % 24 == 0:
        #     print "%i/%i" % (inii + 1, n_random_niis)
        nfoci = np.random.randint(*n_random_foci)
        cur_img = np.zeros(masker.mask_img_.shape)
        for ifocus in xrange(nfoci):
            # find random point within mask
            i = np.random.randint(0, len(x_inds) - 1)
            x, y, z = x_inds[i], y_inds[i], z_inds[i]
            # put a dot there
            if np.random.randint(0, 2):
                cur_img[x, y, z] = 150
            else:
                cur_img[x, y, z] = -150

        # smooth current image of random foci
        cur_fwhm = np.random.randint(*fwhm_range)
        new_nii = smooth_img(
            nib.Nifti1Image(cur_img, masker.mask_img_.get_affine(),
                header=masker.mask_img_.get_header()), cur_fwhm)

        list_niis.append(new_nii)

    X_rest = masker.transform(list_niis)

    return np.vstack((X_rest, X_rest, X_rest, X_rest,
                      X_rest, X_rest, X_rest, X_rest))
Esempio n. 43
0
from nilearn import datasets, plotting, image

data = datasets.fetch_adhd()

mean_func = image.mean_img(data.func[0])

for smoothing in range(0, 25, 5):
    plotting.plot_epi(image.smooth_img(mean_func, smoothing),
                      title="Smoothing %imm" % smoothing)

As we vary the smoothing FWHM, note how we decrease the amount of noise,
but also loose spatial details. In general, the best amount of smoothing
for a given analysis depends on the spatial extent of the effects that
are expected.

"""

from nilearn import datasets, plotting, image

data = datasets.fetch_development_fmri(n_subjects=1)

# Print basic information on the dataset
print('First subject functional nifti image (4D) are located at: %s' %
      data.func[0])

first_epi_file = data.func[0]

# First the compute the mean image, from the 4D series of image
mean_func = image.mean_img(first_epi_file)

# Then we smooth, with a varying amount of smoothing, from none to 20mm
# by increments of 5mm
for smoothing in range(0, 25, 5):
    smoothed_img = image.smooth_img(mean_func, smoothing)
    plotting.plot_epi(smoothed_img,
                      title="Smoothing %imm" % smoothing)


plotting.show()
Esempio n. 45
0
subject_IDs = [str(i).zfill(3) for i in range(1, 17)]
IDs = list(zip([run_ID for _ in range(16) for run_ID in run_IDs],
               [subject_ID for _ in range(3) for subject_ID in subject_IDs]))
IDs.sort()

# Do this for all subjects/runs:
for ID in IDs:
    run, subject = ID


    # Extract necessary data
    sub = ds005(subject, run)
    data = sub.filtered.data
    affine = sub.filtered.affine
    img = sub.filtered.img
    smooth_img = image.smooth_img(img, 6) # smoothed image


    # Save paths for input and output
    path_result = "results/run%s/visualization/sub%s/" % ID
    file_path = "results/run%s/glm/sub%s/" % ID

    try:
        os.makedirs(path_result)
    except OSError:
        if not os.path.isdir(path_result):
            raise
    
    t_map_gain_file = file_path + "t_stat_gain.nii.gz"
    t_map_loss_file = file_path + "t_stat_loss.nii.gz"
    t_map_dist_file = file_path + "t_stat_dist2indiff.nii.gz"
Esempio n. 46
0
n_samples, n_features = images_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Perform massively univariate analysis with permuted OLS ###################
print "Massively univariate model"
neg_log_pvals, all_scores, _ = permuted_ols(
    cdr, images_masked, age,  # + intercept as a covariate by default
    n_perm=1000,
    n_jobs=-1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()[..., 0]

### Show results
print "Plotting results"
# background anat
mean_anat = smooth_img(images[0], FWHM).get_data()
for img in images[1:]:
    mean_anat += smooth_img(img, FWHM).get_data()
mean_anat /= float(len(images))
ref_img = nibabel.load(images[0])
picked_slice = 36
vmin = -np.log10(0.1)  # 10% corrected
plt.figure(figsize=(5, 4))
p_ma = np.ma.masked_less(neg_log_pvals_unmasked, vmin)
plt.imshow(np.rot90(mean_anat[..., picked_slice]),
           interpolation='nearest', cmap=plt.cm.gray, vmin=0., vmax=1.)
im = plt.imshow(np.rot90(p_ma[..., picked_slice]),
                interpolation='nearest', cmap=plt.cm.autumn,
                vmin=vmin, vmax=np.amax(neg_log_pvals_unmasked))
plt.axis('off')
plt.colorbar(im)
Esempio n. 47
0
# Import the os module, for file manipulation
import os

#########################################################################
# Let us use a Nifti file that is shipped with nilearn
from nilearn.datasets import data
anat_filename = os.path.join(os.path.dirname(data.__file__),
                             'avg152T1_brain.nii.gz')
print('anat_filename: %s' % anat_filename)

#########################################################################
# Using simple image nilearn functions
from nilearn import image
# functions containing 'img' can take either a filename or an image as input
smooth_anat_img = image.smooth_img(anat_filename, 3)

# While we are giving a file name as input, the object that is returned
# is a 'nibabel' object. It has data, and an affine
anat_data = smooth_anat_img.get_data()
print('anat_data has shape: %s' % str(anat_data.shape))
anat_affine = smooth_anat_img.get_affine()
print('anat_affineaffine:\n%s' % anat_affine)

# Finally, it can be passed to nilearn function
smooth_anat_img = image.smooth_img(smooth_anat_img, 3)

#########################################################################
# Visualization
from nilearn import plotting
cut_coords = (0, 0, 0)
Esempio n. 48
0
from nilearn.input_data import MultiNiftiMasker
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from nilearn.plotting import plot_glass_brain
from scipy.stats import ttest_1samp
import sys
from nibabel import load
from nilearn.image import smooth_img

models = sys.argv[1:]

mask = 'brainmask_group_template.nii.gz'

valid_subjs = [1,2,5,6,7,8,9,11,12,14,15,16,17,18,19]

scores_bsc = np.arctanh(apply_mask(smooth_img(sorted(glob.glob('MaThe/avg_maps/model_{}_*whole*'.format(models[0]))), 3.0), mask_img=mask)).astype('float32')
scores_mfs = np.arctanh(apply_mask(smooth_img(sorted(glob.glob('MaThe/avg_maps/model_{}_*whole*'.format(models[1]))), 3.0), mask_img=mask)).astype('float32')
scores_bsc[scores_bsc<0] = 0
scores_mfs[scores_mfs<0] = 0
diff_scores = scores_bsc - scores_mfs
mean_diff = diff_scores.mean(axis=0)
tscores, p_values = ttest_1samp(diff_scores, 0, axis=0)
p_values[np.isnan(p_values)] = 1
which_ones = p_values > 0.05
mean_diff[which_ones] = 0
tscores[which_ones] = 0
display = fsf.plot_diff_avg_whole(mean_diff, 0.001)
display.savefig('mean_unthresh_diff_model_whole_3mm_{}.svg'.format('_'.join(models)))
display.savefig('mean_unthresh_diff_model_whole_3mm_{}.png'.format('_'.join(models)))
fsf.save_map_avg_whole(mean_diff, threshold=None, model='3mm_diff_'+'_'.join(models))
display = fsf.plot_diff_avg_whole(tscores, 0.001)
# Block Design using original data
start = np.loadtxt('../../../data/convo/' + f1 + '_conv001.txt')[4:]
end = np.loadtxt('../../../data/convo/' + f1 + '_conv004.txt')[4:]
convo = np.loadtxt('../../../data/convo/' + f1 + '_conv005.txt')[4:]
# Building design X matrix
design = np.ones((len(convo), 4))
design[:, 1] = start
design[:, 2] = end
design[:, 3] = convo
X = design
beta, errors, RSS, df = linear_modeling.beta_est(dat, X)

pval = normal_assumption.sw(errors)

# smoothing
fmri_img = image.smooth_img('../../../data/sub001/BOLD/' + f1 + '/filtered_func_data_mni.nii.gz', fwhm=6)
mean_img = image.mean_img(fmri_img)
# Thresholding
p_val = np.ones(vol_shape + (pval.shape[1],))
p_val[in_brain_mask, :] = pval

log_p_values = -np.log10(p_val[..., 0])
log_p_values[np.isnan(log_p_values)] = 0.
log_p_values[log_p_values > 10.] = 10.
log_p_values[log_p_values < -np.log10(0.05/137)] = 0
plot_stat_map(nib.Nifti1Image(log_p_values, img.get_affine()),
              mean_img, title='SW Test p-values for Block Design, original', 
              annotate=False,colorbar=True)
plt.savefig("../../../data/GLS/block_normality_test.png")
#--------------------------------------------------------------------------
# Block Design using smoothed data
Esempio n. 50
0
    mask_img = load_mni152_brain_mask()
    masker = NiftiMasker(
        mask_img=mask_img, memory='nilearn_cache', memory_level=1)
    masker = masker.fit()

    # Images may fail to be transformed, and are of different shapes,
    # so we need to transform one-by-one and keep track of failures.
    X = []
    is_usable = np.ones((len(images),), dtype=bool)

    for index, image_path in enumerate(images):
        # load image and remove nan and inf values.
        # applying smooth_img to an image with fwhm=None simply cleans up
        # non-finite values but otherwise doesn't modify the image.
        image = smooth_img(image_path, fwhm=None)
        try:
            X.append(masker.transform(image))
        except Exception as e:
            meta = nv_data['images_meta'][index]
            print("Failed to mask/reshape image: id: {0}; "
                  "name: '{1}'; collection: {2}; error: {3}".format(
                      meta.get('id'), meta.get('name'),
                      meta.get('collection_id'), e))
            is_usable[index] = False

# Now reshape list into 2D matrix, and remove failed images from terms
X = np.vstack(X)
term_weights = term_weights[is_usable, :]

Esempio n. 51
0
# MNI152_FILE_PATH is nothing more than a string with a path pointing to
# a nifti image. You can replace it with a string pointing to a file on
# your disk. Note that it should be a 3D volume, and not a 4D volume.

#########################################################################
# Simple image manipulation: smoothing
# -------------------------------------
#
# Let's use an image-smoothing function from nilearn:
# :func:`nilearn.image.smooth_img`
#
# Functions containing 'img' can take either a filename or an image as input.
#
# Here we give as inputs the image filename and the smoothing value in mm
from nilearn import image
smooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3)

# While we are giving a file name as input, the function returns
# an in-memory object:
print(smooth_anat_img)

#########################################################################
# This is an in-memory object. We can pass it to nilearn function, for
# instance to look at it
plotting.plot_img(smooth_anat_img)

#########################################################################
# We could also pass it to the smoothing function
more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)
plotting.plot_img(more_smooth_anat_img)
Esempio n. 52
0
from nilearn.masking import apply_mask
from nilearn.input_data import MultiNiftiMasker
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from nilearn.plotting import plot_glass_brain
from nilearn.image import smooth_img
from scipy.stats import ttest_1samp
import sys
from nibabel import load

model = sys.argv[1]

mask = 'brainmask_group_template.nii.gz'

valid_subjs = [1,2,5,6,7,8,9,11,12,14,15,16,17,18,19]

scores = apply_mask(smooth_img(sorted(glob.glob('MaThe/avg_maps/model_depcor_{}_*'.format(model))), 5.0), mask_img=mask).astype('float32')
mean_scores = scores.mean(axis=0)
tscores, p_values = ttest_1samp(scores, 0, axis=0)
p_values[np.isnan(p_values)] = 1
which_ones = p_values > 0.05
mean_scores[which_ones] = 0
tscores[which_ones] = 0
display = fsf.plot_diff_avg_whole(mean_scores, 0.001)
display.savefig('mean_5mm_unthresh_depcor_model_whole_{}.svg'.format(model))
display.savefig('mean_5mm_unthresh_depcor_model_whole_{}.png'.format(model))
fsf.save_map_avg_whole(mean_scores, threshold=None, model='5mm_depcor_'+model)
display = fsf.plot_diff_avg_whole(tscores, 0.001)
display.savefig('ttest_5mm_unthresh_depcor_model_whole_{}.svg'.format(model))
display.savefig('ttest_5mm_unthresh_depcor_model_whole_{}.png'.format(model))
import numpy as np
import featurespace_fun as fsf
import matplotlib.pyplot as plt
from nilearn.masking import apply_mask
from nilearn.image import smooth_img
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from scipy.stats import ttest_1samp
import sys
from scipy.stats.mstats import trimmed_mean_ci
from scipy.stats import ttest_1samp, trim_mean

models = sys.argv[1:]

#models = ['logBSC_H200_ds_conv', 'logMFS_ds']

mask = 'brainmask_group_template.nii.gz'

scores_bsc = np.arctanh(apply_mask(smooth_img(glob.glob('MaThe/avg_maps/model_{}_*whole*'.format(models[0])), fwhm=3.0), mask_img=mask))
scores_mfs = np.arctanh(apply_mask(smooth_img(glob.glob('MaThe/avg_maps/model_{}_*whole*'.format(models[1])), fwhm=3.0), mask_img=mask))
diff_scores = scores_bsc - scores_mfs
mean_diff = trim_mean(diff_scores, 0.08, axis=0)
trim_mean_ci = trimmed_mean_ci(diff_scores, (0.08, 0.08), axis=0)
which_ones = np.logical_not(np.logical_or(trim_mean_ci[0,:] > 0, trim_mean_ci[1,:] < 0))
mean_diff[which_ones] = 0

display = fsf.plot_diff_avg_whole(mean_diff, 0.001)
display.savefig('mean_diff_smoothed_trim_model_{}.svg'.format('_'.join(models)))
display.savefig('mean_diff_smoothed_trim_model_{}.png'.format('_'.join(models)))
fsf.save_map_avg_whole(mean_diff, threshold=None, model='diff_smooth_trim_'+'_'.join(models))
Esempio n. 54
0
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cax)
plt.show()

# Visualizing p values for the middle slice in gray
fig, axes = plt.subplots(nrows=3, ncols=4)
for i, ax in zip(range(0,design_mat_filter.shape[1] - 1,1), axes.flat):
    im = ax.imshow(p_val[:, :, 45, i], cmap = 'gray')
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cax)
plt.show()



fmri_img = image.smooth_img('../../../data/sub001/BOLD/task001_run001/filtered_func_data_mni.nii.gz', fwhm=6)
mean_img = image.mean_img(fmri_img)
# Thresholding
p_val = np.ones(vol_shape + (p_mat.shape[0],))
p_val[in_brain_mask, :] = p_mat.T

log_p_values = -np.log10(p_val[..., 6])
log_p_values[np.isnan(log_p_values)] = 0.
log_p_values[log_p_values > 10.] = 10.
log_p_values[log_p_values < -np.log10(0.05/137)] = 0
plot_stat_map(nib.Nifti1Image(log_p_values, img.get_affine()),
              mean_img, title='Thresholded p-values after Filtering (beta 6)', annotate=False,
              colorbar=True)

################### Noise from our model
# voxel time course
Esempio n. 55
0
print('anat_filename: %s' % anat_filename)

# Using nibabel.load to load existing Nifti image #############################
import nibabel
anat_img = nibabel.load(anat_filename)

# Accessing image data and affine #############################################
anat_data = anat_img.get_data()
print('anat_data has shape: %s' % str(anat_data.shape))
anat_affine = anat_img.get_affine()
print('anat_affine:\n%s' % anat_affine)

# Using image in nilearn functions ############################################
from nilearn import image
# functions containing 'img' can take either a filename or an image as input
smooth_anat_img = image.smooth_img(anat_filename, 6)
smooth_anat_img = image.smooth_img(anat_img, 6)


# Visualization ###############################################################
from nilearn import plotting
cut_coords = (0, 0, 0)
plotting.plot_anat(anat_filename, cut_coords=cut_coords,
                   title='Anatomy image')
plotting.plot_anat(smooth_anat_img,
                   cut_coords=cut_coords,
                   title='Smoothed anatomy image')

# Saving image to file ########################################################
smooth_anat_img.to_filename('smooth_anat_img.nii.gz')
Esempio n. 56
0
# When using methods that are not robust to noise, it is useful to apply a
# spatial filtering kernel on the data. Such data smoothing is usually applied
# using a Gaussian function with 4mm to 12mm full-width at half-maximum (this
# is where the FWHM comes from). The function :func:`nilearn.image.smooth_img`
# accounts for potential anisotropy in the image affine (i.e., non-indentical
# voxel size in all the three dimensions). Analogous to the majority of nilearn
# functions, smooth_img function can also use file names as input parameters.

# Smooth the data using image processing module from nilearn
from nilearn import image

# Functional data
fmri_filename = haxby_dataset.func[0]
# smoothing: first argument as functional data filename and smoothing value
# (integer) in second argument. Output returns in Nifti image.
fmri_img = image.smooth_img(fmri_filename, fwhm=6)

# Visualize the mean of the smoothed EPI image using plotting function
# `plot_epi`
from nilearn.plotting import plot_epi

# First, compute the voxel-wise mean of smooth EPI image (first argument) using
# image processing module `image`
mean_img = image.mean_img(fmri_img)
# Second, we visualize the mean image with coordinates positioned manually
plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords)

##############################################################################
# Given the smoothed functional data stored in variable 'fmri_img', we then
# select two features of interest with face and house experimental conditions.
# The method we will be using is a simple Student's t-test. The below section
Esempio n. 57
0
def do_subject_glm(subject_id):
    subject_output_dir = os.path.join(output_dir, subject_id)

    # make design matrices
    design_matrices = []
    func = []
    anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
    for run_path in sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/task*"))):
        run_id = os.path.basename(run_path)
        run_func = glob.glob(os.path.join(subject_output_dir, "BOLD", run_id,
                                          "wrbold*.nii"))
        assert len(run_func) == 1
        run_func = run_func[0]
        run_onset_paths = sorted(glob.glob(os.path.join(
            data_dir, subject_id, "model/model001/onsets/%s/*" % run_id)))
        onsets = map(np.loadtxt, run_onset_paths)
        conditions = np.hstack(
            [[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c])
             for c in range(len(run_onset_paths))])
        onsets = np.vstack((onsets))
        onsets *= tr
        run_func = nibabel.load(run_func)
        func.append(run_func)
        n_scans = run_func.shape[-1]
        onset, duration, modulation = onsets.T

        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        paradigm = pd.DataFrame(dict(name=conditions, onset=onset,
                                     duration=duration, modulation=modulation))
        design_matrix = make_design_matrix(frametimes, paradigm,
                                           hrf_model=hrf_model,
                                           drift_model=drift_model,
                                           period_cut=hfcut)
        design_matrices.append(design_matrix)
    n_runs = len(func)

    # specify contrasts
    _, _, names = check_design_matrix(design_matrix)
    n_columns = len(names)
    contrast_matrix = np.eye(n_columns)
    contrasts = {}
    for c in range(len(condition_keys)):
        contrasts[names[2 * c]] = contrast_matrix[2 * c]
    contrasts["avg"] = np.mean(contrasts.values(), axis=0)

    # more interesting contrasts
    contrasts_ = {}
    for contrast, val in contrasts.items():
        if not contrast == "avg":
            contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
    contrasts = contrasts_

    # fit GLM
    from nilearn.image import smooth_img
    func = smooth_img(func, fwhm=8.)
    print 'Fitting a GLM (this takes time)...'
    fmri_glm = FMRILinearModel(func, [check_design_matrix(design_matrix)[1]
                                      for design_matrix in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(subject_output_dir, "mask.nii")
    print "Saving mask image to %s ..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute contrast maps
    z_maps = {}
    effects_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, effects_map, var_map = fmri_glm.contrast(
            [contrast_val] * n_runs, con_id=contrast_id, output_z=True,
            output_stat=True, output_effects=True, output_variance=True)
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
                                     [z_map, t_map, effects_map, var_map]):
            map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            print "\t\tWriting %s ..." % map_path
            nibabel.save(out_map, map_path)
            if map_type == 'z':
                z_maps[contrast_id] = map_path
            if map_type == 'effects':
                effects_maps[contrast_id] = map_path

    # # generate stats report
    # stats_report_filename = os.path.join(subject_output_dir, "reports",
    #                                      "report_stats.html")
    # generate_subject_stats_report(
    #     stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
    #     threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
    #     subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
    #     paradigm=paradigm, frametimes=frametimes,
    #     drift_model=drift_model, hrf_model=hrf_model)
    # ProgressReport().finish_dir(subject_output_dir)

    return dict(subject_id=subject_id, mask=mask_path,
                effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)
Esempio n. 58
0
brain_mask = sys.argv[2]
gm_mask = sys.argv[3]
max_lag = int(sys.argv[4])
out_dir = sys.argv[5]
out_prefix = sys.argv[6]
smooth_kernel = sys.argv[7]

lags = range(-max_lag, max_lag+1)

brain_mask_img = nib.load(brain_mask)
brain_mask_data = brain_mask_img.get_data().astype(bool)

gm_mask_img = nib.load(gm_mask)
gm_mask_data = gm_mask_img.get_data().astype(bool)

epi_img = nib.load(epi)
epi_img_resampled = image.resample_img(epi_img, target_affine=brain_mask_img.get_affine(), target_shape=brain_mask_img.shape)
epi_data_resampled = epi_img_resampled.get_data()

lag_map_data = gen_lag_map(epi_data_resampled, brain_mask_data, gm_mask_data, lags)

lag_map_image = masking.unmask(lag_map_data, brain_mask_img)

lag_map_image_smoothed = image.smooth_img(lag_map_image, float(smooth_kernel))

nib.save(lag_map_image, os.path.join(out_dir, out_prefix+'_lag{}.nii.gz'.format(str(max_lag))))
nib.save(lag_map_image_smoothed, os.path.join(out_dir, out_prefix+'_lag{}_smoothed{}.nii.gz'.format(str(max_lag),smooth_kernel)))



Esempio n. 59
0
        Conv3D(16, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3DTranspose(16, kernel_size=3, strides=2, activation='relu',
                        padding="same"),
        BatchNormalization(),
        Conv3D(1, kernel_size=3, activation=None, padding="same"),
    ], name="decoder")
    autoencoder = Sequential([encoder, decoder], name="autoencoder")
    return encoder, decoder, autoencoder



if __name__ == "__main__":
    data = datasets.fetch_haxby(subjects=(2,))
    fmri_filename = data.func[0]
    smoothed_img = image.smooth_img(fmri_filename, 2)
    
    smoothed_data = smoothed_img.get_data().transpose(3, 0, 1, 2)
    #mean = smoothed_data.mean(axis=0)
    #smoothed_data -= mean
    #scale = smoothed_data.std(axis=0) + 1e-6
    scale = smoothed_data.std()  # global scale
    smoothed_data /= scale
    smoothed_data = smoothed_data[:, :, :, :, None]
    input_shape = smoothed_data.shape[1:]
    smoothed_data_train = smoothed_data[:1200]
    smoothed_data_test = smoothed_data[1200:]
    
    encoder, decoder, autoencoder = make_models(input_shape=input_shape)
    autoencoder.compile(optimizer=Adam(lr=0.001), loss="mse")
haxby_labels = np.genfromtxt(haxby_files.session_target[0], skip_header=1,
                          usecols=[0], dtype=basestring)

### Visualization function ####################################################

import matplotlib.pyplot as plt
from nilearn.plotting import plot_epi, plot_stat_map, plot_roi
from nilearn.input_data import NiftiLabelsMasker

plt.close('all')

### Find voxels of interest ###################################################

# Smooth the data
from nilearn import image
fmri_img = image.smooth_img(haxby_files.func[0], fwhm=6)

# Plot the mean image
fig_id = plt.subplot(2, 1, 1)
mean_img = image.mean_img(fmri_img)
plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=(coronal, sagittal,
    axial), axes=fig_id)

# Run a T-test for face and houses
from scipy import stats
fmri_data = fmri_img.get_data()
_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == 'face'],
                              fmri_data[..., haxby_labels == 'house'],
                              axis=-1)

# Use a log scale for p-values