def _clean(
        self,
        img: Nifti1Image,
        confounds: npt.ArrayLike,
        clean_settings: Optional[dict] = None,
    ) -> Nifti1Image:
        """
        Perform standard Nilearn signals.clean
        """

        if len(img.shape) != 4:
            logging.error("Image is not a time-series image!")
            raise TypeError

        try:
            t_r = img.header["pixdim"][4]
        except IndexError:
            raise

        if not clean_settings:
            clean_settings = self.clean_settings

        return nimg.clean_img(img,
                              confounds=confounds,
                              t_r=t_r,
                              **clean_settings)
def map_load_fmri_image_3d(dp, target_name):
    fmri_file = dp[0]
    confound_file = dp[1]
    label_trials = dp[2]

    ###remove confound effects
    confound = np.loadtxt(confound_files[0])
    fmri_data_clean = image.clean_img(fmri_files[0],
                                      detrend=True,
                                      standardize=True,
                                      confounds=confound)

    ##pre-select task types
    trial_mask = pd.Series(label_trials).isin(
        target_name)  ##['hand', 'foot','tongue']
    fmri_data_cnn = image.index_img(fmri_data_clean,
                                    np.where(trial_mask)[0]).get_data()
    ###use each slice along z-axis as one sample
    label_data_trial = np.array(label_trials.loc[trial_mask])
    le = preprocessing.LabelEncoder()
    le.fit(target_name)
    label_data_cnn = le.transform(
        label_data_trial
    )  ##np_utils.to_categorical(): convert label vector to matrix

    img_rows, img_cols, img_deps = fmri_data_cnn.shape[:-1]
    fmri_data_cnn_test = np.transpose(fmri_data_cnn, (3, 0, 1, 2))
    label_data_cnn_test = label_data_cnn.flatten()
    print(fmri_file, fmri_data_cnn_test.shape, label_data_cnn_test.shape)

    return fmri_data_cnn_test, label_data_cnn_test
Exemplo n.º 3
0
    def remove_confounds(nii,
                         confounds,
                         t_r=2.0,
                         confound_names=None,
                         lp=None):
        import nibabel as nib
        import pandas as pd
        import os
        from nilearn.image import clean_img
        img = nib.load(nii)
        confounds_pd = pd.read_csv(confounds, sep="\t")
        if confound_names is None:
            confound_names = [
                col for col in confounds_pd.columns
                if 'CompCor' in col or 'X' in col or 'Y' in col or 'Z' in col
            ]
        confounds_np = confounds_pd.as_matrix(columns=confound_names)
        kwargs = {'imgs': img, 'confounds': confounds_np, 't_r': t_r}
        if lp:
            kwargs['low_pass'] = lp
        cleaned_img = clean_img(**kwargs)
        working_dir = os.getcwd()
        resid_nii = os.path.join(working_dir, 'resid.nii.gz')
        nib.save(cleaned_img, resid_nii)

        return resid_nii
def get_avmovie_data(sub, run):

    """Get a clean nifti image for one subject and one avmovie run"""

    src = NIFTI_SRC.format(sub=sub, run=run)
    print('Reading {}'.format(src))
    nft = image.smooth_img(image.clean_img(src), data.SMOOTHING)
    print('Done')
    return nft
Exemplo n.º 5
0
    def _temporal_filter(bold, lp, hp):
        from nilearn.image import clean_img
        import nibabel as nib
        import os

        tfilt_niimg = clean_img(bold, low_pass=lp, high_pass=hp)
        out_path = os.getcwd()
        out_file = os.path.join(out_path, 'bold_tfilt.nii.gz')
        nib.save(tfilt_niimg, out_file)
        return out_file
Exemplo n.º 6
0
def subject_data(sub):

    sessions = np.zeros(360)
    sessions[:90] = 1
    sessions[90:180] = 2
    sessions[180:270] = 3
    sessions[270:] = 4
    return image.smooth_img(
        image.clean_img(image.concat_imgs(
            src.format(sub=sub) for src in NIFTI_SRC),
                        sessions=sessions), SMOOTHING)
Exemplo n.º 7
0
def run_clean_img(filename, t_r, low_pass, high_pass):
    import os
    import nibabel as nib
    from nilearn.image import clean_img

    func_bandpassed = clean_img(filename, detrend=True, t_r=t_r,
                                 low_pass=low_pass, high_pass=high_pass)

    clean_func_fname = os.path.abspath('filtered_func.nii.gz')
    nib.save(func_bandpassed, clean_func_fname)
    print('bandpassed run saved to %s'%clean_func_fname)

    return clean_func_fname
Exemplo n.º 8
0
    def _run_interface(self, runtime):

        smoothing = self.inputs.smoothing
        pipeline_name = self.inputs.pipeline['name']
        pipeline_aroma = self.inputs.pipeline['aroma']
        img = nb.load(self.inputs.fmri_prep)
        if pipeline_aroma:
            if not self.inputs.fmri_prep_aroma:
                raise ValueError("No ICA-AROMA files found")

            img_aroma = nb.load(self.inputs.fmri_prep_aroma)
            img = img_aroma

        # Handle possibility of null pipeline
        try:
            conf = pd.read_csv(
                self.inputs.conf_prep,
                delimiter='\t',
                #low_memory=False,
                #engine='python'
            )
            conf = conf.values
        except pd.errors.EmptyDataError:
            conf = None

        # Determine proper TR
        task = self.inputs.entities['task']
        if task in self.inputs.tr_dict:
            tr = self.inputs.tr_dict[task]
        else:
            raise KeyError(f'{task} TR not found in tr_dict')

        if smoothing and not pipeline_aroma:
            img = smooth_img(img, fwhm=6)

        denoised_img = clean_img(img,
                                 confounds=conf,
                                 high_pass=self.inputs.high_pass,
                                 low_pass=self.inputs.low_pass,
                                 t_r=tr)  # TODO: Add masking: mask_img

        _, base, _ = split_filename(self.inputs.fmri_prep)
        denoised_file = f'{self.inputs.output_dir}/{base}_denoised_pipeline-{pipeline_name}.nii.gz'

        nb.save(denoised_img, denoised_file)

        self._results['fmri_denoised'] = denoised_file

        return runtime
Exemplo n.º 9
0
    def _run_interface(self, runtime):
        fname = self.inputs.fmri_prep
        img = nb.load(fname)
        cname = self.inputs.conf_prep
        conf = pd.read_csv(cname, delimiter='\t')
        conf = conf.values

        denoised_img = clean_img(img, confounds=conf)

        _, base, _ = split_filename(fname)
        denoised_file = f'{self.inputs.output_dir}/{base}_denoised.nii'

        nb.save(denoised_img, denoised_file)
        self._results['fmri_denoised'] = denoised_file

        return runtime
    def _transform(self, img: Nifti1Image, confounds: pd.DataFrame,
                   fd_thres: float) -> Nifti1Image:

        t_r = img.header["pixdim"][4]
        mask_frames, censor_frames = self._get_censor_mask(
            confounds["framewise_displacement"], fd_thres)

        # Step: 1, 2, 3
        c_img = self._censor_and_filter(img, mask_frames, censor_frames, t_r)
        confounds = self._generate_design(confounds).T
        c_confounds = self._censor_and_filter(confounds, mask_frames,
                                              censor_frames, t_r)

        # Step: 4, 5
        return nimg.clean_img(
            _get_vol_index(c_img, mask_frames),
            confounds=c_confounds[:, mask_frames].T,
            detrend=self._detrend,
            standardize=self._standardize,
        )
Exemplo n.º 11
0
    def _run_interface(self, runtime):

        fmri_file = self._validate_fmri_prep_files()
        entities = parse_file_entities(fmri_file)
        self._validate_filtering(entities['task'])
        self._load_confouds()
        entities = parse_file_entities(self._fmri_file)
        fmri_denoised = clean_img(nb.load(self._fmri_file),
                                  confounds=self._confounds,
                                  **self._filtering_kwargs)

        entities['pipeline'] = self.inputs.pipeline['name']
        fmri_denoised_fname = join(
            self.inputs.output_dir,
            build_path(entities, self.fmri_denoised_pattern, False))
        assert not exists(fmri_denoised_fname), f"Denoising is run twice at {self._fmri_file} " \
                                                f"with result {fmri_denoised_fname}"
        nb.save(fmri_denoised, fmri_denoised_fname)
        self._results['fmri_denoised'] = fmri_denoised_fname

        return runtime
Exemplo n.º 12
0
    def denoise(self):

        for task in tasks:
            # if 'memory' in task:
            if tasks[task]['ses'] == 1 and self.subj.num in [105,106]:
                tr = 2.23
            else:
                tr = 2
            
            if task == 'localizer_run-02' and self.subj.num == 107:
                pass
            else:

                inbold = os.path.join(self.subj.func,'%s_ses-%s_task-%s_space-%s_desc-preproc_bold.nii.gz'%(self.subj.fsub,tasks[task]['ses'],task,self.space))
                outbold = os.path.join(self.subj.func,'%s_ses-%s_task-%s_space-%s_desc-preproc_denoised_bold.nii.gz'%(self.subj.fsub,tasks[task]['ses'],task,self.space))
                confounds = os.path.join(self.subj.model_dir,task,'confounds.txt')

                tmp = clean_img(nib.load(inbold), detrend=False, standardize=True, confounds=confounds,
                            low_pass=None, high_pass=None, t_r=tr, ensure_finite=True, mask_img=None)

                nib.save(tmp,outbold)
    def _transform(self, img: Nifti1Image, confounds: pd.DataFrame,
                   fd_thres: float) -> Nifti1Image:

        t_r = img.header["pixdim"][4]
        mask_frames, censor_frames = self._get_censor_mask(
            confounds["framewise_displacement"], fd_thres)

        # Step: 1, 2
        confounds = self._generate_design(confounds)[mask_frames, :]
        clean_img = self._clean(_get_vol_index(img, mask_frames), confounds)

        # Step: 3
        clean_img = _interpolate_frames(clean_img, mask_frames, censor_frames,
                                        t_r)

        # Step: 4
        out_img = nimg.clean_img(clean_img,
                                 low_pass=self._low_pass,
                                 high_pass=self._high_pass,
                                 t_r=t_r)

        # Step: 5
        return _get_vol_index(out_img, mask_frames)
Exemplo n.º 14
0
def main(subject, session, bids_folder='/data2/ds-risk'):

    derivatives = op.join(bids_folder, 'derivatives')

    #Make columns
    columns = []
    for n, modality in zip([3, 4, 2], ['cardiac', 'respiratory', 'interaction']):    
        for order in range(1, n+1):
            columns += [(modality, order, 'sin'), (modality, order, 'cos')]

    columns = pd.MultiIndex.from_tuples(columns, names=['modality', 'order', 'type'])


    # Prepare T1w
    t1w = op.join(derivatives, 'fmriprep', f'sub-{subject}', 'anat', f'sub-{subject}_desc-preproc_T1w.nii.gz')

    if not op.exists(t1w):
        print(f'{t1w} does not exist!')
        t1w = op.join(derivatives, 'fmriprep', f'sub-{subject}', 'ses-7t1', 'anat', f'sub-{subject}_ses-7t1_desc-preproc_T1w.nii.gz')
            
    t1w_mask = op.join(derivatives, 'fmriprep', f'sub-{subject}', 'anat', f'sub-{subject}_desc-brain_mask.nii.gz')
    if not op.exists(t1w_mask):
        t1w_mask = op.join(derivatives, 'fmriprep', f'sub-{subject}', 'ses-7t1', 'anat', f'sub-{subject}_ses-7t1_desc-brain_mask.nii.gz')

    t1w = image.math_img('t1w*mask', t1w=t1w, mask=t1w_mask)
    t1w = image.math_img('np.clip(t1w, 0, np.percentile(t1w, 95))', t1w=t1w)

    # Get session info
    if session[-1] == '1':
        runs = range(1, 5)
        task = 'mapper'
    elif session[-1] == '2':
        runs = range(1, 9)
        task = 'task'

    # Make figure folder    
    figure_dir = op.join(derivatives, 'physioplots', f'sub-{subject}', f'ses-{session}', 'func')
    if not op.exists(figure_dir):
        os.makedirs(figure_dir)
        
    # Loop over runs
    for run in runs:
        confounds = pd.read_csv(op.join(derivatives, 'physiotoolbox', f'sub-{subject}', f'ses-{session}', 'func', 
                                 f'sub-{subject}_ses-{session}_task-{task}_run-{run}_desc-retroicor_timeseries.tsv'),
                               usecols=range(18), names=columns, sep='\t')
        
        im = image.load_img(op.join(derivatives, 'fmriprep', f'sub-{subject}', f'ses-{session}', 'func', 
                                 f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-preproc_bold.nii.gz'))
        
        # if session[0] == '3':
            # im = image.smooth_img(im, 5)

        im = image.clean_img(im,  detrend=False, standardize=True)
        clean_im_all = image.clean_img(im,  confounds=confounds.values, detrend=False, standardize=False)
        clean_im_resp = image.clean_img(im,  confounds=confounds['respiratory'].values, detrend=False, standardize=False)
        clean_im_cardiac = image.clean_img(im,  confounds=confounds['cardiac'].values, detrend=False, standardize=False)
        clean_im_interaction = image.clean_img(im,  confounds=confounds['interaction'].values, detrend=False, standardize=False)
        
        r2_all = image.math_img('1 - (np.var(clean_im, -1) / np.var(im, -1))', im=im, clean_im=clean_im_all)
        r2_resp = image.math_img('1 - (np.var(clean_im, -1) / np.var(im, -1))', im=im, clean_im=clean_im_resp)
        r2_cardiac = image.math_img('1 - (np.var(clean_im, -1) / np.var(im, -1))', im=im, clean_im=clean_im_cardiac)
        r2_interaction = image.math_img('1 - (np.var(clean_im, -1) / np.var(im, -1))', im=im, clean_im=clean_im_interaction)
        
        r2_all.to_filename(op.join(derivatives, 'physiotoolbox', f'sub-{subject}', f'ses-{session}', 'func', 
                                  f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-r2all_bold.nii.gz'))
        r2_resp.to_filename(op.join(derivatives, 'physiotoolbox', f'sub-{subject}', f'ses-{session}', 'func', 
                                  f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-r2resp_bold.nii.gz'))
        r2_cardiac.to_filename(op.join(derivatives, 'physiotoolbox', f'sub-{subject}', f'ses-{session}', 'func', 
                                  f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-r2cardiac_bold.nii.gz'))
        
        r2_interaction.to_filename(op.join(derivatives, 'physiotoolbox', f'sub-{subject}', f'ses-{session}', 'func', 
                                  f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-r2interaction_bold.nii.gz'))    
        
        
        n_slices = r2_all.shape[2]
        slices = 8

        plotting.plot_stat_map(r2_cardiac, t1w, display_mode='z', threshold=0.2, figure=run, axes=(0, .66, 1, .33),
                vmax=.8,
                              cut_coords=slices)

        plotting.plot_stat_map(r2_resp, t1w, display_mode='z', threshold=0.2, cmap='viridis', figure=run, 
                              axes=(0., 0.33, 1, .33),
                              vmax=.8,
                            cut_coords=slices)

        plotting.plot_stat_map(r2_interaction, t1w, display_mode='z', threshold=0.125, cmap='Blues', figure=run, 
                              axes=(0., 0.0, 1, .33),
                              vmax=.8,
                               cut_coords=slices)

        plt.gcf().set_size_inches((12, 6))

        # plt.tight_layout()

        plt.savefig(op.join(derivatives, 'physioplots', f'sub-{subject}', f'ses-{session}', 'func', 
                                  f'sub-{subject}_ses-{session}_task-{task}_run-{run}_space-T1w_desc-r2.png'), 
                   )
Exemplo n.º 15
0
def main(subject, session, bids_folder, smoothed=False, concatenate=False, space='T1w'):

    target_dir = 'encoding_model'

    if smoothed:
        encoding_model += '.smoothed'


    target_dir = get_target_dir(subject, session, bids_folder, target_dir)

    # Create confounds
    fmriprep_confounds = get_fmriprep_confounds(subject, session, bids_folder)
    retroicor_confounds = get_retroicor_confounds(subject, session, bids_folder)
    response_hrf = get_mapper_response_hrf(subject, session, bids_folder)

    confounds = pd.concat((fmriprep_confounds, retroicor_confounds,
        response_hrf), axis=1)

    paradigm = get_mapper_paradigm(subject, session, bids_folder)

    images = []

    psc_dir = op.join(bids_folder, 'derivatives', 'psc', f'sub-{subject}', f'ses-{session}', 'func')

    if not op.exists(psc_dir):
        os.makedirs(psc_dir)

    masks = []

    runs = get_runs(subject, session)

    for run in runs:
        print(f'cleaning run {run}')
        d = get_volume_data(subject, session, run, bids_folder, space=space)

        if smoothed:
            d = image.smooth_img(d, 5.0)

        d = psc(d)

        d_cleaned = image.clean_img(d, confounds=confounds.loc[run].values, standardize=False, detrend=False, ensure_finite=True)

        d_cleaned.to_filename(op.join(psc_dir, f'sub-{subject}_ses-{session}_task-mapper_run-{run}_desc-psc_bold.nii.gz'))

        images.append(d_cleaned)
        masks.append(get_brain_mask(subject, session, run, bids_folder))

    if ((subject == '13') & (session == '3t1')):
        masks[-1] = image.resample_to_img(masks[-1], masks[0], 'nearest')  

    conjunct_mask = image.math_img('mask.sum(-1).astype(np.bool)', mask=image.concat_imgs(masks))

    masker = NiftiMasker(conjunct_mask)

    data = [pd.DataFrame(masker.fit_transform(im), index=paradigm.index)  for im in images]
    data = pd.concat(data, keys=runs, names=['run'])
    data.columns.name = 'voxel'

    mean_data =data.groupby('time').mean()

    mean_image = masker.inverse_transform(mean_data)
    mean_target_dir = get_target_dir(subject, session, bids_folder, 'mean_clean_volumes')
    mean_image.to_filename(op.join(mean_target_dir, f'sub-{subject}_ses-{session}_task-mapper_desc-meanedcleaned_bold.nii.gz'))

    hrf_model = SPMHRFModel(tr=get_tr(subject, session), time_length=20)
    model = GaussianPRFWithHRF(hrf_model=hrf_model)

    # # SET UP GRID
    mus = np.log(np.linspace(5, 80, 40, dtype=np.float32))
    sds = np.log(np.linspace(2, 30, 40, dtype=np.float32))
    amplitudes = np.array([1.], dtype=np.float32)
    baselines = np.array([0], dtype=np.float32)

    optimizer = ParameterFitter(model, mean_data, paradigm)

    grid_parameters = optimizer.fit_grid(mus, sds, amplitudes, baselines, use_correlation_cost=True)
    grid_parameters = optimizer.refine_baseline_and_amplitude(grid_parameters, n_iterations=2)

    optimizer.fit(init_pars=grid_parameters, learning_rate=.1, store_intermediate_parameters=False, max_n_iterations=5000)

    target_fn = op.join(target_dir, f'sub-{subject}_ses-{session}_desc-r2.optim_space-T1w_pars.nii.gz')

    masker.inverse_transform(optimizer.r2).to_filename(target_fn)

    for par, values in optimizer.estimated_parameters.T.iterrows():
        print(values)
        target_fn = op.join(target_dir, f'sub-{subject}_ses-{session}_desc-{par}.optim_space-T1w_pars.nii.gz')
        masker.inverse_transform(values).to_filename(target_fn)
Exemplo n.º 16
0
def fetch_fmriprep_session(dimension: int = 1024,
                           resolution_mm: int = 3,
                           data_dir: Optional[str, PathLike] = None,
                           apply_kws: Optional[dict] = None,
                           clean_kws: Optional[dict] = None,
                           design_kws: Optional[dict] = None,
                           glm_kws: Optional[dict] = None,
                           masker_kws: Optional[dict] = None,
                           **kwargs
                           ) -> Bunch:
    """
    Fetch and load in memory a participant's fMRI data for a session.

    """

    from nilearn import image as nimage
    from operator import itemgetter
    from sklearn.utils import Bunch

    events, behav = [pd.read_csv(item, sep='\t') for item in
                     (self.events_path, self.beh_path)]
    events['trial_number'] = 'trial_'+events.trial_number.astype(str)

    fmri_img, mask_img, anat_img = [nimage.load_img(item) for item in
                                    itemgetter(*['fmri_path', 'mask_path',
                                                 'anat_path'])(self)]
    t_r, frame_times = get_t_r(fmri_img), get_frame_times(fmri_img)

    if apply_kws is not None:
        session.apply_defs.update(apply_kws)
    if clean_kws is not None:
        session.clean_defs.update(clean_kws)

    fmri_img = nimage.clean_img(fmri_img, confounds=conf,
                                t_r=t_r, mask_img=mask_img,
                                **session.clean_defs)

    # Argument definitions for each preprocessing step
    target_shape, target_affine = mask_img.shape, fmri_img.affine

    session.glm_defs.update(Bunch(mask_img=mask_img,
                                  t_r=t_r,
                                  target_shape=target_shape,
                                  target_affine=target_affine,
                                  subject_label='_'.join([session.sub_id,
                                                          session.ses_id])))

    if design_kws is not None:
        session.design_defs.update(design_kws)
    if masker_kws is not None:
        session.masker_defs.update(masker_kws)
    if glm_kws is not None:
        session.glm_defs.update(glm_kws)

    loaded_attributes = Bunch(events=events, behav=behav,
                              frame_times=frame_times,
                              t_r=t_r,
                              confounds=conf,
                              denoise_strategy=denoise_strategy,
                              smoothing_fwhm=session.apply_defs.smoothing_fwhm,
                              anat_img=anat_img, fmri_img=fmri_img,
                              mask_img=mask_img)
#                               masker=masker)

    default_params = Bunch(apply_defs=session.apply_defs,
                           clean_defs=session.clean_defs,
                           design_defs=session.design_defs,
                           glm_defs=session.glm_defs,
                           masker_defs=session.masker_defs)
    session.update(loaded_attributes)
    session.update(default_params)
    session.get_t_r, session.get_frame_times = get_t_r, get_frame_times
    return session
Exemplo n.º 17
0
from nilearn.image import clean_img
import pandas as pd

#read confounds table from fmriprep into a pandas dataframe
df = pd.read_table(snakemake.input.confounds_tsv)

#get specified confounds from the dataframe
confounds = df[snakemake.params.confounds_to_use].to_numpy()

#use nilearn to clean
cleaned = clean_img(snakemake.input.nii,
                    detrend=True,
                    standardize=True,
                    confounds=confounds,
                    mask_img=snakemake.input.mask_nii)

#save to nifti
cleaned.to_filename(snakemake.output.denoised)
# --------------
from nilearn.datasets import fetch_spm_auditory
from nilearn import image
from nilearn import masking
import pandas as pd

# load fMRI data
subject_data = fetch_spm_auditory()
fmri_img = image.concat_imgs(subject_data.func)

# Make an average
mean_img = image.mean_img(fmri_img)
mask = masking.compute_epi_mask(mean_img)

# Clean and smooth data
fmri_img = image.clean_img(fmri_img, standardize=False)
fmri_img = image.smooth_img(fmri_img, 5.)

# load events
events = pd.read_table(subject_data['events'])

#########################################################################
# Fit model
# ---------
# Note that `minimize_memory` is set to `False` so that `FirstLevelModel`
# stores the residuals.
# `signal_scaling` is set to False, so we keep the same scaling as the
# original data in `fmri_img`.
from nilearn.glm.first_level import FirstLevelModel

fmri_glm = FirstLevelModel(t_r=7,
Exemplo n.º 19
0
data[np.where(data >= 0.1)] = 1
data[np.where(data <= 0.1)] = 0

mask = nib.Nifti1Image(data.astype(np.float32), affine)
nib.save(
    mask,
    '/home/brainlab/Desktop/Rudas/Data/Propofol/Awake/Task/output/datasink/preprocessing/sub-2014_05_02_02CB/mask.nii'
)

image_cleaned = clean_img(
    '/home/brainlab/Desktop/Rudas/Data/Propofol/Awake/Task/output/datasink/preprocessing/sub-2014_05_02_02CB/swfmri_art_removed.nii',
    sessions=None,
    detrend=True,
    standardize=True,
    high_pass=0.01,
    t_r=2,
    confounds=
    '/home/brainlab/Desktop/Rudas/Data/Propofol/Awake/Task/output/datasink/preprocessing/sub-2014_05_02_02CB/ev_without_gs.csv',
    ensure_finite=True,
    mask_img=
    '/home/brainlab/Desktop/Rudas/Data/Propofol/Awake/Task/output/datasink/preprocessing/sub-2014_05_02_02CB/mask.nii'
)

nib.save(
    image_cleaned,
    '/home/brainlab/Desktop/Rudas/Data/Propofol/Awake/Task/output/datasink/preprocessing/sub-2014_05_02_02CB/cleaned.nii'
)

data_cleaned = image_cleaned.get_data()

predictors = np.loadtxt(
Exemplo n.º 20
0
    confound_df = pd.read_csv(confound, delimiter='\t')

    confound_vars = [
        'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z',
        'global_signal', 'a_comp_cor_01', 'a_comp_cor_02'
    ]
    confound_df = confound_df[confound_vars]
    for col in confound_df.columns:
        new_name = '{}_dt'.format(col)
        new_col = confound_df[col].diff()
        confound_df[new_name] = new_col
        confound_df.head()
    raw_func_img = nimg.load_img(image)
    func_img = raw_func_img.slicer[:, :, :, 10:]
    drop_confound_df = confound_df.loc[10:]
    confounds_matrix = drop_confound_df.values

    high_pass = 0.009
    low_pass = 0.08
    t_r = 2
    clean_img = nimg.clean_img(func_img,
                               confounds=confounds_matrix,
                               detrend=True,
                               standardize=True,
                               low_pass=low_pass,
                               high_pass=high_pass,
                               t_r=t_r,
                               mask_img=mask)
    smooth_img = nimg.smooth_img(clean_img, [6, 6, 6])
    smooth_img.to_filename(os.path.join(dirname, 'smooth6.nii.gz'))
Exemplo n.º 21
0
parser.add_argument("-confounds", type=str, default=None, metavar="",
                    help="Path to text file with confounds")
parser.add_argument("--detrend", action='store_true',
                    help="Remove linear trends")
parser.add_argument("--standardize", action='store_true',
                    help="Standardize to unit variance")
args = parser.parse_args()

detrend = args.detrend
standardize = args.standardize
high_pass = args.high_pass
low_pass = args.low_pass
t_r = args.t_r

if (high_pass or low_pass) and t_r is None:
    raise ValueError("Please specify -t_r for temporal filtering operations")

if (high_pass and low_pass) and high_pass >= low_pass:
    raise ValueError(f"""High pass cutoff ({high_pass}) >= \
low pass cutoff ({low_pass})""")

if args.confounds:
    confounds = np.loadtxt(args.confounds)
    print(f"{args.confounds} loaded as confounds with shape {confounds.shape}")

filtered_img = clean_img(args.input, detrend=detrend, standardize=standardize,
                         low_pass=low_pass, high_pass=high_pass, t_r=t_r,
                         confounds=confounds)

filtered_img.to_filename(args.output)
Exemplo n.º 22
0
import numpy as np
import pandas as pd
import nibabel as nib
from glob import glob
from nistats.first_level_model import run_glm
from nistats.contrasts import compute_contrast
from nilearn import image, masking

pybest_dir = 'pybest/data/ni-edu/derivatives/pybest/sub-02/ses-1'

mask = pybest_dir + '/preproc/sub-02_ses-1_task-face_desc-preproc_mask.nii.gz'
trials = sorted(glob(pybest_dir + '/best/*desc-trial*'))
for i in range(len(trials)):
    trials[i] = image.clean_img(trials[i], detrend=False, standardize=True)

Y = masking.apply_mask(image.concat_imgs(trials), mask)

events = pybest_dir + '/preproc/sub-02_ses-1_task-face_desc-preproc_events.tsv'
events = pd.read_csv(events, sep='\t').query("trial_type != 'rating' and trial_type != 'response'")
events.loc[:, 'face_eth'] = ['asian' if 'sian' in s else s for s in events['face_eth']]
events.loc[:, 'trial_type'] = [s[-7:] for s in events.loc[:, 'trial_type']]
X = events.loc[:, ['subject_dominance', 'subject_trustworthiness', 'subject_attractiveness']]
X /= X.mean(axis=0)
X = pd.concat((X, pd.get_dummies(events.loc[:, 'trial_type'])), axis=1)
X = pd.concat((X, pd.get_dummies(events.loc[:, 'face_eth'])), axis=1)
labels, results = run_glm(Y, X.to_numpy(), noise_model='ols')

for i in range(X.shape[1]):
    cvec = np.zeros(X.shape[1])
    cvec[i] = 1
    zscores = compute_contrast(labels, results, con_val=cvec, contrast_type='t').z_score()
Exemplo n.º 23
0
            # load functional image
            func_img = img.load_img(func)

            # Change confounds to matrix (confirm matrix size with confounds_matrix.shape)
            confounds_matrix = confound_df.values

            # Define high_pass and low_pass values
            high_pass = 0.008
            low_pass = 0.08

            # Clean
            clean_img = img.clean_img(func_img,
                                      confounds=confounds_matrix,
                                      detrend=True,
                                      standardize=True,
                                      low_pass=low_pass,
                                      high_pass=high_pass,
                                      t_r=2.61,
                                      mask_img=mask,
                                      ensure_finite=True)

            #Create new subject-directory in the reho-directory
            new_folder = os.path.join(path_reho, dirname)
            os.mkdir(new_folder)
            clean_file = new_folder + '/' + dirname + '_task-rest_space-MNI152NLin6Asym_clean.nii.gz'
            # Save to nii.gz
            clean_img.to_filename(clean_file)

########################################################
"""
Exemplo n.º 24
0
        gm_data_copy = gm_data.copy()
        where_are_NaNs = np.isnan(gm_data_copy)
        gm_data[where_are_NaNs] = 0
        gm_data[gm_data_copy >= threshold] = 1
        gm_data[gm_data_copy < threshold] = 0

        confunds_path = join(preprocessing_path, subject, 'ev_without_gs.csv')

        fmri_cleaned_path = join(preprocessing_path, subject, 'fmri_cleaned.nii')
        if not path.exists(fmri_cleaned_path):
            print('Cleaning image')
            image_cleaned = clean_img(fmri_preprocessed_path,
                                      sessions=None,
                                      detrend=True,
                                      standardize=True,
                                      low_pass=0.08,
                                      high_pass=0.009,
                                      t_r=2,
                                      confounds=confunds_path,
                                      ensure_finite=True,
                                      mask_img=nmi_brain_mask_path)
            nib.save(image_cleaned, fmri_cleaned_path)
        else:
            print('Image cleaned found')
            image_cleaned = nib.load(fmri_cleaned_path)

        folder_output = join(preprocessing_path, subject, 'parcellation_from_lasso')
        time_series_path = join(folder_output, 'time_series.txt')
        makedir(folder_output)

        if not path.exists(time_series_path):
            time_series = np.transpose(np.asarray(change_resolution(image_cleaned.get_data(), gm_data)))
    zmap_filenames.append('/home/jmuraskin/Projects/CCD/working_v1/seed-to-voxel/%s/%s/%s_%s.nii.gz' % (fc,secondlevel_folder_names[fb],fc,subj))

mask_filename='/home/jmuraskin/Projects/CCD/working_v1/seg_probabilities/grey_matter_mask-20-percent.nii.gz'

from scipy.stats import zscore
#load phenotypic data
phenoFile='/home/jmuraskin/Projects/CCD/Pheno/narsad+vt_new.csv'
pheno=read_csv(phenoFile)
pheno=pheno.set_index('participant')

ages=zscore(pheno.loc[goodsubj]['V1_DEM_001'])

mf=zscore(pheno.loc[goodsubj]['V1_DEM_002'])

motionTest=read_csv('/home/jmuraskin/Projects/CCD/CCD-scripts/analysis/CCD_meanFD.csv')
meanFD=zscore(motionTest[motionTest.FB=='FEEDBACK'][motionTest.Subject_ID.isin(goodsubj)]['train_meanFD'])


imgs=image.concat_imgs(zmap_filenames)

clean_imgs=image.clean_img(imgs,confounds=[ages,mf,meanFD],detrend=False,standardize=True)


from nilearn.decoding import SpaceNetRegressor

decoder = SpaceNetRegressor(mask=mask_filename, penalty="tv-l1",
                            eps=1e-1,  # prefer large alphas
                            memory="nilearn_cache",n_jobs=30)

decoder.fit(clean_imgs, behavioral_target)
Exemplo n.º 26
0
    'csf', 'csf_derivative1', 'csf_derivative1_power2', 'csf_power2',
    'global_signal', 'global_signal_derivative1',
    'global_signal_derivative1_power2', 'global_signal_power2', 'trans_x',
    'trans_x_derivative1', 'trans_x_power2', 'trans_x_derivative1_power2',
    'trans_y', 'trans_y_derivative1', 'trans_y_power2',
    'trans_y_derivative1_power2', 'trans_z', 'trans_z_derivative1',
    'trans_z_derivative1_power2', 'trans_z_power2', 'rot_x',
    'rot_x_derivative1', 'rot_x_power2', 'rot_x_derivative1_power2', 'rot_y',
    'rot_y_derivative1', 'rot_y_derivative1_power2', 'rot_y_power2', 'rot_z',
    'rot_z_derivative1', 'rot_z_derivative1_power2', 'rot_z_power2'
]

## Denoie, smooth, and maks BOLD data.
func_img = image.clean_img(func_path,
                           high_pass=0.008,
                           t_r=2,
                           confounds=cf_df.loc[:, reg_cols].values,
                           mask_img=brain_mask_img)
func_img = image.smooth_img(func_img, 3)

brain_mask_data = brain_mask_img.get_fdata()
brain_coords = np.argwhere(brain_mask_data == 1)

## Create a design matrix to describe direction from each voxel to its neighbors
unit = [1, 0, -1]
Mi = np.empty((27, 6), dtype='float')
M_orig = np.flip(np.array(list(itertools.product(unit, repeat=3))),
                 axis=1).astype('float')
M = M_orig.copy()

for row_idx, row_vector in enumerate(M_orig):
Exemplo n.º 27
0
    def _run_interface(self, runtime):

        from nilearn import datasets
        from nilearn.input_data import NiftiLabelsMasker
        from nilearn.image import clean_img
        import numpy as np
        import nibabel as nib

        #dataset = datasets.fetch_atlas_harvard_oxford(self.inputs.atlas_identifier)
        #atlas_filename = dataset.maps

        image_cleaned = clean_img(
            self.inputs.in_file,
            sessions=None,
            detrend=True,
            standardize=True,
            #low_pass=0.1,
            high_pass=0.01,
            t_r=self.inputs.tr,
            confounds=self.inputs.confounds_file,
            ensure_finite=True,
            mask_img=
            '/home/brainlab/Desktop/Rudas/Data/Propofol/MNI152_T1_2mm_brain_mask.nii.gz'
        )

        nib.save(image_cleaned, 'fmri_cleaned.nii')

        masker = NiftiLabelsMasker(
            labels_img=
            '/home/brainlab/Desktop/Rudas/Data/Parcellation/atlas_NMI_2mm.nii',
            standardize=True,
            detrend=True,
            low_pass=0.1,
            high_pass=0.01,
            t_r=self.inputs.tr,
            memory='nilearn_cache',
            verbose=0)

        file_labels = open(
            '/home/brainlab/Desktop/Rudas/Data/Parcellation/AAL from Freesourfer/fs_default.txt',
            'r')
        labels = []
        for line in file_labels.readlines():
            labels.append(line)
        file_labels.close()

        time_series = masker.fit_transform(
            self.inputs.in_file, confounds=self.inputs.confounds_file)

        np.savetxt(self.inputs.time_series_out_file,
                   time_series,
                   fmt='%10.2f',
                   delimiter=',')

        if self.inputs.plot:
            from nilearn import plotting
            from nilearn.connectome import ConnectivityMeasure
            import matplotlib
            import matplotlib.pyplot as plt
            fig, ax = matplotlib.pyplot.subplots()

            font = {'family': 'normal', 'size': 5}

            matplotlib.rc('font', **font)

            correlation_measure = ConnectivityMeasure(kind='correlation')
            correlation_matrix = correlation_measure.fit_transform(
                [time_series])[0]

            # Mask the main diagonal for visualization:
            np.fill_diagonal(correlation_matrix, 0)
            plotting.plot_matrix(correlation_matrix,
                                 figure=fig,
                                 labels=labels,
                                 vmax=0.8,
                                 vmin=-0.8,
                                 reorder=True)

            fig.savefig(self.inputs.correlation_matrix_out_file, dpi=1200)

        return runtime
import sys
import os
import numpy as np
from load_confounds import Params36
from nilearn.masking import apply_mask
from nilearn.image import clean_img

mask_path = "data/external/visual_mask.nii.gz"
dataset_path = "data/friends"
derivatives_path = os.path.join(dataset_path,
                                "derivatives/fmriprep-20.1.0/fmriprep")

i = int(sys.argv[1])

with open("utils/remaining_files_to_mask.txt", "r") as f:
    file_list = [line.rstrip('\n') for line in f]

for file_path in file_list[10 * i:10 * (i + 1)]:
    file_name = os.path.split(file_path)[1][:-6] + "npy"
    conf = Params36().load(file_path)

    cleaned_img = clean_img(file_path, confounds=conf)
    masked_data = apply_mask(cleaned_img, mask_path)

    np.save(os.path.join("data/preprocessed/fmri/", file_name), masked_data)
    print("masked date saved at {}".format(
        os.path.join("data/preprocessed/fmri/", file_name)))
    del (conf)
    del (cleaned_img)
    del (masked_data)
Exemplo n.º 29
0
def nuisance_regress(inputimg,
                     confoundsfile,
                     inputmask,
                     inputtr=0,
                     conftype="36P",
                     spikethr=0.25,
                     smoothkern=6.0,
                     discardvols=4,
                     highpassval=0.008,
                     lowpassval=0.08,
                     confoundsjson='',
                     addregressors=''):
    """
    
    returns a nibabel.nifti1.Nifti1Image that is cleaned in following ways:
        detrending, smoothed, motion parameter regress, spike regress, 
        bandpass filtered, and normalized
        
    options for motion paramter regress: 36P, 9P, 6P, or aCompCor
    
    signal cleaning params from:
        
        Parkes, L., Fulcher, B., Yücel, M., & Fornito, A. (2018). An evaluation 
        of the efficacy, reliability, and sensitivity of motion correction 
        strategies for resting-state functional MRI. NeuroImage, 171, 415-436.
        
        Ciric, R., Wolf, D. H., Power, J. D., Roalf, D. R., Baum, G. L., 
        Ruparel, K., ... & Gur, R. C. (2017). Benchmarking of participant-level
        confound regression strategies for the control of motion artifact in 
        studies of functional connectivity. Neuroimage, 154, 174-187.
    
    """

    dct = False
    if highpassval == 'cosine':
        print("using cosine basis for high pass")
        highpassval = None
        dct = True
    else:
        highpassval = float(highpassval)

    if lowpassval == 0:
        print("detected lowpassval 0, setting to None")
        lowpassval = None
    else:
        # check highpass versus low pass
        if highpassval:
            if highpassval >= lowpassval:
                print("high and low pass values dont make sense. exiting")
                exit(1)

    # extract confounds
    confounds, outlier_stats = get_confounds(confoundsfile,
                                             kind=conftype,
                                             spikereg_threshold=spikethr,
                                             confounds_json=confoundsjson,
                                             dctbasis=dct,
                                             addreg=addregressors)

    # check tr
    if inputtr == 0:
        # read the tr from the fourth dimension of zooms, this depends on the input
        # data being formatted to have the dim4 be the TR...
        tr = inputimg.header.get_zooms()[3]
        print("found that tr is: {}".format(str(tr)))

        if tr == 0:
            print("thats not a good tr. exiting")
            exit(1)

    else:
        tr = inputtr

    if inputmask is not None:
        print("cleaning image with masker")

        # masker params
        masker_params = {
            "mask_img": inputmask,
            "detrend": False,
            "standardize": True,
            "low_pass": lowpassval,
            "high_pass": highpassval,
            "t_r": tr,
            "smoothing_fwhm": smoothkern,
            "verbose": 1,
        }

        # invoke masker
        masker = input_data.NiftiMasker(**masker_params)

        # perform the nuisance regression
        time_series = masker.fit_transform(inputimg,
                                           confounds=confounds.values)

        # inverse masker operation to get the nifti object, n.b. this returns a Nifti1Image!!!
        outimg = masker.inverse_transform(time_series)  # nus regress

    else:
        # no mask! so no masker
        print("cleaning image with no mask")

        clean_params = {
            "confounds": confounds.values,
            "detrend": False,
            "standardize": True,
            "low_pass": lowpassval,
            "high_pass": highpassval,
            "t_r": tr,
        }

        loadimg = image.load_img(inputimg)
        outimg = image.clean_img(loadimg, **clean_params)  # nus regress

    # get rid of the first N volumes
    # outimgtrim = image.index_img(outimg, np.arange(discardvols, outimg.shape[3]))
    if discardvols > 0:
        outimgtrim = image_drop_dummy_trs(outimg, discardvols)
    else:
        outimgtrim = outimg

    return outimgtrim, confounds, outlier_stats
Exemplo n.º 30
0
def avmovie_data(sub, run):

    return image.clean_img(NIFTI_SRC.format(sub=sub, run=run))