def test_calc_beta_map(sub_metadata, preproc_file, sub_events, confounds_file,
                       brainmask_file, hrf_model, return_tstat):

    model = first_level_model.FirstLevelModel(
        t_r=2,
        slice_time_ref=0,
        hrf_model=hrf_model,
        mask=str(brainmask_file),
        smoothing_fwhm=0,
        signal_scaling=False,
        high_pass=0.008,
        drift_model='cosine',
        verbose=1,
        minimize_memory=False,
    )

    lsa_df = _lsa_events_converter(str(sub_events))

    model.fit(str(preproc_file), events=lsa_df)

    i_trial = lsa_df.index[0]
    t_name = lsa_df.loc[i_trial, 'trial_type']

    beta_map = _calc_beta_map(model, t_name, hrf_model, return_tstat)

    assert beta_map.shape == nib.load(str(brainmask_file)).shape
    def _run_interface(self, runtime):
        from nistats import first_level_model
        import nibabel as nib
        import pandas as pd
        import numpy as np

        # get t_r from bold_metadata
        t_r = self.inputs.bold_metadata['RepetitionTime']

        # get the confounds:
        if self.inputs.confounds_file and self.inputs.selected_confounds:
            confounds = _select_confounds(self.inputs.confounds_file,
                                          self.inputs.selected_confounds)
        else:
            confounds = None

        # high_pass, switch from Hz to Period
        high_pass_period = int(1 / self.inputs.high_pass)

        # setup the model
        model = first_level_model.FirstLevelModel(
            t_r=t_r,
            slice_time_ref=0,
            hrf_model=self.inputs.hrf_model,
            mask=self.inputs.mask_file,
            smoothing_fwhm=self.inputs.smoothing_kernel,
            standardize=False,
            signal_scaling=0,
            period_cut=high_pass_period,
            drift_model='cosine',
            verbose=1,
        )

        events_df = pd.read_csv(self.inputs.events_file, sep="\t")
        trial_types = events_df['trial_types'].unique()

        model.fit(self.inputs.bold_file,
                  events=events_df,
                  confounds=confounds)

        design_matrix = model.design_matrices_[0]
        bold_img = nib.load(self.inputs.bold_file)
        predicted_out = np.zeros(bold_img.shape)
        for trial_type in trial_types:
            beta_map = model.compute_contrast(trial_type, output_type='effect_size')
            beta_map_ex = np.expand_dims(beta_map, axis=3)

            design_series = design_matrix[trial_type]
            brain_map = np.tile(design_series, bold_img.shape)

            predicted_out += beta_map_ex * brain_map

        fname = "predicted_task_bold.nii.gz"
        bold_img.__class__(predicted_out, bold_img.affine, bold_img.header).to_filename(fname)

        self._results['predicted_bold'] = fname

        return runtime
Exemple #3
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import first_level_model as level1
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        flm = level1.FirstLevelModel(mask_img=mask_file,
                                     smoothing_fwhm=smoothing_fwhm)
        flm.fit(img, design_matrices=mat)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']
        fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns.tolist()):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            maps = flm.compute_contrast(weights,
                                        contrast_type,
                                        output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
Exemple #4
0
    def _run_interface(self, runtime):
        info = self.inputs.session_info
        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        if info['sparse'] not in (None, 'None'):
            sparse = pd.read_hdf(info['sparse'], key='sparse').rename(
                columns={'condition': 'trial_type',
                         'amplitude': 'modulation'})
            sparse = sparse.dropna(subset=['modulation'])  # Drop NAs
        else:
            sparse = None

        if info['dense'] not in (None, 'None'):
            dense = pd.read_hdf(info['dense'], key='dense')
            column_names = dense.columns.tolist()
            drift_model = None if 'cosine_00' in column_names else 'cosine'
        else:
            dense = None
            column_names = None
            drift_model = 'cosine'

        mat = dm.make_first_level_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            events=sparse,
            add_regs=dense,
            add_reg_names=column_names,
            drift_model=drift_model,
        )

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        flm = level1.FirstLevelModel(
            mask=mask_file, smoothing_fwhm=smoothing_fwhm)
        flm.fit(img, design_matrices=mat)

        contrast_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']
        for name, weights, type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns.tolist()):
            es = flm.compute_contrast(
                weights, type, output_type='effect_size')
            es_fname = os.path.join(
                runtime.cwd, '{}.nii.gz').format(name)
            es.to_filename(es_fname)

            contrast_maps.append(es_fname)
            contrast_metadata.append(
                {'contrast': name,
                 'suffix': 'effect',
                 **out_ents}
                )

        self._results['contrast_maps'] = contrast_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
Exemple #5
0
def first_level(analysis, block, space, deriv_dir):
    analyses = []
    for paradigm, _, ents in block.get_design_matrix(block.model['HRF_variables'],
                                                     mode='sparse'):
        preproc_files = analysis.layout.get(type='preproc', space=space, **ents)
        if len(preproc_files) == 0:
            raise ValueError("No PREPROC files found")

        if len(preproc_files) != 1:
            print(preproc_files)
            raise ValueError("Too many potential PREPROC files")

        fname = preproc_files[0].filename

        img = nb.load(fname)
        TR = img.header.get_zooms()[3]
        vols = img.shape[3]

        # Get dense portion of design matrix once TR is known
        _, confounds, _ = block.get_design_matrix(mode='dense',
                                                  sampling_rate=1/TR, **ents)[0]
        names = [col for col in confounds.columns
                 if col.startswith('NonSteadyStateOutlier') or
                 col in block.model['variables']]

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * TR,
            paradigm=paradigm.rename(columns={'condition': 'trial_type',
                                              'amplitude': 'modulation'}),
            add_regs=confounds[names].fillna(0),
            add_reg_names=names,
            drift_model=None if 'Cosine00' in names else 'cosine',
            )

        preproc_ents = analysis.layout.parse_file_entities(fname)

        dm_ents = {k: v for k, v in preproc_ents.items()
                   if k in ('subject', 'session', 'task')}

        dm_ents['type'] = 'design'
        design_fname = op.join(deriv_dir,
                               analysis.layout.build_path(dm_ents, strict=True))
        os.makedirs(op.dirname(design_fname), exist_ok=True)
        mat.to_csv(design_fname, sep='\t')
        plt.set_cmap('viridis')
        plot_and_save(design_fname.replace('.tsv', '.svg'),
                      nis.reporting.plot_design_matrix, mat)

        corr_ents = dm_ents.copy()
        corr_ents['type'] = 'corr'
        corr_fname = op.join(deriv_dir,
                             analysis.layout.build_path(corr_ents, strict=True))
        plot_and_save(corr_fname, plot_corr_matrix,
                      mat.drop(columns=['constant']).corr(),
                      len(block.model['HRF_variables']))

        job_desc = {
            'ents': ents,
            'subject_id': ents['subject'],
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'design_matrix_svg': design_fname.replace('.tsv', '.svg'),
            'correlation_matrix_svg': corr_fname,
            }

        cnames = [contrast['name'] for contrast in block.contrasts] + block.model['HRF_variables']
        contrast_matrix = []
        if cnames:
            contrasts_ents = corr_ents.copy()
            contrasts_ents['type'] = 'contrasts'
            contrasts_fname = op.join(
                deriv_dir,
                analysis.layout.build_path(contrasts_ents, strict=True))

            contrast_matrix = expand_contrast_matrix(
                block.get_contrasts(cnames, **ents)[0][0], mat)
            plot_and_save(contrasts_fname, plot_contrast_matrix,
                          contrast_matrix.drop(['constant'], 'index'),
                          ornt='horizontal')

            job_desc['contrasts_svg'] = contrasts_fname

        brainmask = analysis.layout.get(type='brainmask', space=space,
                                        **ents)[0]
        fmri_glm = None

        for contrast in contrast_matrix:
            stat_ents = preproc_ents.copy()
            stat_ents.pop('modality', None)
            stat_ents.update({'contrast': snake_to_camel(contrast),
                              'type': 'stat'})
            stat_fname = op.join(deriv_dir,
                                 analysis.layout.build_path(stat_ents,
                                                            strict=True))

            ortho_ents = stat_ents.copy()
            ortho_ents['type'] = 'ortho'
            ortho_fname = op.join(deriv_dir,
                                  analysis.layout.build_path(ortho_ents,
                                                             strict=True))

            desc = {'name': contrast, 'image_file': ortho_fname}
            if contrast not in block.model['HRF_variables']:
                job_desc.setdefault('contrasts', []).append(desc)
            else:
                job_desc.setdefault('estimates', []).append(desc)

            if op.exists(stat_fname):
                continue

            if fmri_glm is None:
                fmri_glm = level1.FirstLevelModel(mask=brainmask.filename)
                fmri_glm.fit(fname, design_matrices=mat)

            stat_types = [c['type'] for c in block.contrasts if c['name'] == contrast]
            stat_type = stat_types[0] if stat_types else 'T'
            stat = fmri_glm.compute_contrast(contrast_matrix[contrast].values,
                                             {'T': 't', 'F': 'F'}[stat_type])
            stat.to_filename(stat_fname)

            nlp.plot_glass_brain(stat, colorbar=True, plot_abs=False,
                                 display_mode='lyrz', axes=None,
                                 output_file=ortho_fname)

        analyses.append(job_desc)

    return analyses
Exemple #6
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import first_level_model as level1
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)
        if isinstance(img, nb.dataobj_images.DataobjImage):
            # Ugly hack to ensure that retrieved data isn't cast to float64 unless
            # necessary to prevent an overflow
            # For NIfTI-1 files, slope and inter are 32-bit floats, so this is
            # "safe". For NIfTI-2 (including CIFTI-2), these fields are 64-bit,
            # so include a check to make sure casting doesn't lose too much.
            slope32 = np.float32(img.dataobj._slope)
            inter32 = np.float32(img.dataobj._inter)
            if max(np.abs(slope32 - img.dataobj._slope),
                   np.abs(inter32 - img.dataobj._inter)) < 1e-7:
                img.dataobj._slope = slope32
                img.dataobj._inter = inter32

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        flm = level1.FirstLevelModel(mask_img=mask_file,
                                     smoothing_fwhm=smoothing_fwhm)
        flm.fit(img, design_matrices=mat)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']
        fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns.tolist()):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            maps = flm.compute_contrast(weights,
                                        contrast_type,
                                        output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
Exemple #7
0
    def _run_interface(self, runtime):
        import matplotlib
        matplotlib.use('Agg')
        import seaborn as sns
        from matplotlib import pyplot as plt
        sns.set_style('white')
        plt.rcParams['svg.fonttype'] = 'none'
        plt.rcParams['image.interpolation'] = 'nearest'

        info = self.inputs.session_info

        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        events = pd.read_hdf(info['events'], key='events')
        confounds = pd.read_hdf(info['confounds'], key='confounds')
        if isdefined(self.inputs.contrast_info):
            contrast_spec = pd.read_hdf(self.inputs.contrast_info,
                                        key='contrasts')
        else:
            contrast_spec = pd.DataFrame()

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            paradigm=events.rename(columns={
                'condition': 'trial_type',
                'amplitude': 'modulation'
            }),
            add_regs=confounds,
            add_reg_names=confounds.columns.tolist(),
            drift_model=None if 'Cosine00' in confounds.columns else 'cosine',
        )

        exp_vars = events['condition'].unique().tolist()

        contrast_matrix, contrast_types = build_contrast_matrix(
            contrast_spec, mat, exp_vars)

        plt.set_cmap('viridis')
        plot_and_save('design.svg', nis.reporting.plot_design_matrix, mat)
        self._results['design_matrix_plot'] = os.path.join(
            runtime.cwd, 'design.svg')

        plot_and_save('correlation.svg', plot_corr_matrix,
                      mat.drop(columns='constant').corr(), len(exp_vars))
        self._results['correlation_matrix_plot'] = os.path.join(
            runtime.cwd, 'correlation.svg')

        plot_and_save('contrast.svg',
                      plot_contrast_matrix,
                      contrast_matrix.drop(['constant'], 'index'),
                      ornt='horizontal')
        self._results['contrast_matrix_plot'] = os.path.join(
            runtime.cwd, 'contrast.svg')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        flm = level1.FirstLevelModel(mask=mask_file)
        flm.fit(img, design_matrices=mat)

        estimate_maps = []
        contrast_maps = []
        estimate_metadata = []
        contrast_metadata = []
        estimate_map_plots = []
        contrast_map_plots = []
        stat_fmt = os.path.join(runtime.cwd, '{}.nii.gz').format
        plot_fmt = os.path.join(runtime.cwd, '{}.png').format
        for contrast, ctype in zip(contrast_matrix, contrast_types):
            es = flm.compute_contrast(contrast_matrix[contrast].values, {
                'T': 't',
                'F': 'F'
            }[ctype],
                                      output_type='effect_size')
            es_fname = stat_fmt(contrast)
            es.to_filename(es_fname)
            plot_fname = plot_fmt(contrast)
            nlp.plot_glass_brain(es,
                                 colorbar=True,
                                 plot_abs=False,
                                 display_mode='lyrz',
                                 axes=None,
                                 output_file=plot_fname)

            if contrast in exp_vars:
                estimate_maps.append(es_fname)
                estimate_map_plots.append(plot_fname)
                estimate_metadata.append({
                    'contrast': contrast,
                    'type': 'effect'
                })
            else:
                contrast_maps.append(es_fname)
                contrast_map_plots.append(plot_fname)
                contrast_metadata.append({
                    'contrast': contrast,
                    'type': 'effect'
                })
        self._results['estimate_maps'] = estimate_maps
        self._results['contrast_maps'] = contrast_maps
        self._results['estimate_metadata'] = estimate_metadata
        self._results['contrast_metadata'] = contrast_metadata
        self._results['estimate_map_plots'] = estimate_map_plots
        self._results['contrast_map_plots'] = contrast_map_plots

        return runtime
Exemple #8
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import first_level_model as level1
        from nistats.contrasts import compute_contrast
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)

        is_cifti = isinstance(img, nb.Cifti2Image)
        if isinstance(img, nb.dataobj_images.DataobjImage):
            # Ugly hack to ensure that retrieved data isn't cast to float64 unless
            # necessary to prevent an overflow
            # For NIfTI-1 files, slope and inter are 32-bit floats, so this is
            # "safe". For NIfTI-2 (including CIFTI-2), these fields are 64-bit,
            # so include a check to make sure casting doesn't lose too much.
            slope32 = np.float32(img.dataobj._slope)
            inter32 = np.float32(img.dataobj._inter)
            if max(np.abs(slope32 - img.dataobj._slope),
                   np.abs(inter32 - img.dataobj._inter)) < 1e-7:
                img.dataobj._slope = slope32
                img.dataobj._inter = inter32

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
            labels, estimates = level1.run_glm(img.get_fdata(dtype='f4'),
                                               mat.values)
            model_attr = {
                'r_square':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'r_square'),
                    'r_square'),
                'log_likelihood':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'logL'),
                    'log_likelihood')
            }
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
            flm = level1.FirstLevelModel(minimize_memory=False,
                                         mask_img=mask_file,
                                         smoothing_fwhm=smoothing_fwhm)
            flm.fit(img, design_matrices=mat)
            model_attr = {
                'r_square':
                flm.r_square[0],
                'log_likelihood':
                flm.masker_.inverse_transform(
                    _get_voxelwise_stat(flm.labels_[0], flm.results_[0],
                                        'logL'))
            }

        out_ents = self.inputs.contrast_info[0]['entities']

        # Save model level images

        model_maps = []
        model_metadata = []
        for attr, img in model_attr.items():
            model_metadata.append({'stat': attr, **out_ents})
            fname = fname_fmt('model', attr)
            img.to_filename(fname)
            model_maps.append(fname)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            if is_cifti:
                contrast = compute_contrast(labels,
                                            estimates,
                                            weights,
                                            contrast_type=contrast_type)
                maps = {
                    map_type: dscalar_from_cifti(img,
                                                 getattr(contrast, map_type)(),
                                                 map_type)
                    for map_type in [
                        'z_score', 'stat', 'p_value', 'effect_size',
                        'effect_variance'
                    ]
                }

            else:
                maps = flm.compute_contrast(weights,
                                            contrast_type,
                                            output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata
        self._results['model_maps'] = model_maps
        self._results['model_metadata'] = model_metadata

        return runtime
Exemple #9
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import design_matrix as dm
        from nistats import first_level_model as level1
        info = self.inputs.session_info
        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        if info['sparse'] not in (None, 'None'):
            sparse = pd.read_hdf(info['sparse'],
                                 key='sparse').rename(columns={
                                     'condition': 'trial_type',
                                     'amplitude': 'modulation'
                                 })
            sparse = sparse.dropna(subset=['modulation'])  # Drop NAs
        else:
            sparse = None

        if info['dense'] not in (None, 'None'):
            dense = pd.read_hdf(info['dense'], key='dense')
            column_names = dense.columns.tolist()
            drift_model = None if 'cosine_00' in column_names else 'cosine'
        else:
            dense = None
            column_names = None
            drift_model = 'cosine'

        mat = dm.make_first_level_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            events=sparse,
            add_regs=dense,
            add_reg_names=column_names,
            drift_model=drift_model,
        )

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        flm = level1.FirstLevelModel(mask_img=mask_file,
                                     smoothing_fwhm=smoothing_fwhm)
        flm.fit(img, design_matrices=mat)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']
        fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns.tolist()):
            maps = flm.compute_contrast(weights,
                                        contrast_type,
                                        output_type='all')
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):
                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
Exemple #10
0
    def _run_interface(self, runtime):
        info = self.inputs.session_info

        img = nb.load(self.inputs.bold_file)
        vols = img.shape[3]

        events = pd.read_hdf(info['events'], key='events')

        if info['confounds'] is not None and info['confounds'] != 'None':
            confounds = pd.read_hdf(info['confounds'], key='confounds')
            confound_names = confounds.columns.tolist()
            drift_model = None if 'Cosine00' in confound_names else 'cosine'
        else:
            confounds = None
            confound_names = None
            drift_model = 'cosine'

        if isdefined(self.inputs.contrast_info):
            contrast_spec = pd.read_hdf(self.inputs.contrast_info,
                                        key='contrasts')
        else:
            contrast_spec = pd.DataFrame()

        mat = dm.make_design_matrix(
            frame_times=np.arange(vols) * info['repetition_time'],
            paradigm=events.rename(columns={
                'condition': 'trial_type',
                'amplitude': 'modulation'
            }),
            add_regs=confounds,
            add_reg_names=confound_names,
            drift_model=drift_model,
        )

        # Assume that explanatory variables == HRF-convolved variables
        exp_vars = events['condition'].unique().tolist()

        contrast_matrix, contrast_types = build_contrast_matrix(
            contrast_spec, mat, exp_vars)

        mat.to_csv('design.tsv', sep='\t')
        self._results['design_matrix'] = os.path.join(runtime.cwd,
                                                      'design.tsv')

        contrast_matrix.to_csv('contrasts.tsv', sep='\t')
        self._results['contrast_matrix'] = os.path.join(
            runtime.cwd, 'contrasts.tsv')

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        flm = level1.FirstLevelModel(mask=mask_file)
        flm.fit(img, design_matrices=mat)

        contrast_maps = []
        contrast_metadata = []
        stat_fmt = os.path.join(runtime.cwd, '{}.nii.gz').format
        for contrast, ctype in zip(contrast_matrix, contrast_types):
            es = flm.compute_contrast(contrast_matrix[contrast].values, {
                'T': 't',
                'F': 'F'
            }[ctype],
                                      output_type='effect_size')
            es_fname = stat_fmt(contrast)
            es.to_filename(es_fname)

            contrast_maps.append(es_fname)
            contrast_metadata.append({'contrast': contrast, 'type': 'effect'})
        self._results['contrast_maps'] = contrast_maps
        self._results['contrast_metadata'] = contrast_metadata

        return runtime
Exemple #11
0
    def _run_interface(self, runtime):
        from nistats import first_level_model
        import nibabel as nib
        import os

        # get t_r from bold_metadata
        t_r = self.inputs.bold_metadata['RepetitionTime']

        # get the confounds:
        if self.inputs.confounds_file and self.inputs.selected_confounds:
            confounds = _select_confounds(self.inputs.confounds_file,
                                          self.inputs.selected_confounds)
        else:
            confounds = None

        # high_pass, switch from Hz to Period
        high_pass_period = int(1 / self.inputs.high_pass)

        # setup the model
        model = first_level_model.FirstLevelModel(
            t_r=t_r,
            slice_time_ref=0,
            hrf_model=self.inputs.hrf_model,
            mask=self.inputs.mask_file,
            smoothing_fwhm=self.inputs.smoothing_kernel,
            standardize=True,
            signal_scaling=0,
            period_cut=high_pass_period,
            drift_model='cosine',
            verbose=1)

        # initialize dictionary to contain trial estimates (betas)
        beta_maps = {}
        design_matrix_collector = {}
        for target_trial_df, trial_type, trial_idx in \
                _lss_events_iterator(self.inputs.events_file):

            # fit the model for the target trial
            model.fit(self.inputs.bold_file,
                      events=target_trial_df,
                      confounds=confounds)

            # calculate the beta map
            beta_map = model.compute_contrast(trial_type,
                                              output_type='effect_size')
            design_matrix_collector[trial_idx] = model.design_matrices_[0]
            # import pdb; pdb.set_trace()
            # assign beta map to appropriate list
            if trial_type in beta_maps:
                beta_maps[trial_type].append(beta_map)
            else:
                beta_maps[trial_type] = [beta_map]

        # make a beta series from each beta map list
        beta_series_template = os.path.join(
            runtime.cwd, 'desc-{trial_type}_betaseries.nii.gz')
        # collector for the betaseries files
        beta_series_lst = []
        for t_type, betas in beta_maps.items():
            size_check = len(betas)
            if size_check < 3:
                logging.warning(
                    'At least 3 trials are needed '
                    'for a beta series: {trial_type} has {num}'.format(
                        trial_type=t_type, num=size_check))
            else:
                beta_series = nib.funcs.concat_images(betas)
                nib.save(beta_series,
                         beta_series_template.format(trial_type=t_type))
                beta_series_lst.append(
                    beta_series_template.format(trial_type=t_type))

            self._results['beta_maps'] = beta_series_lst
            self._results['design_matrices'] = design_matrix_collector
        return runtime
Exemple #12
0
    def _run_interface(self, runtime):
        from nistats import first_level_model
        import os

        # get t_r from bold_metadata
        t_r = self.inputs.bold_metadata['RepetitionTime']

        # get the confounds:
        if self.inputs.confounds_file and self.inputs.selected_confounds:
            confounds = _select_confounds(self.inputs.confounds_file,
                                          self.inputs.selected_confounds)
        else:
            confounds = None

        # setup the model
        model = first_level_model.FirstLevelModel(
            t_r=t_r,
            slice_time_ref=0,
            hrf_model=self.inputs.hrf_model,
            mask=self.inputs.mask_file,
            smoothing_fwhm=self.inputs.smoothing_kernel,
            signal_scaling=self.inputs.signal_scaling,
            high_pass=self.inputs.high_pass,
            drift_model='cosine',
            verbose=1,
            fir_delays=self.inputs.fir_delays,
            minimize_memory=False,
        )

        # initialize dictionary to contain trial estimates (betas)
        beta_maps = {}
        residuals = None
        design_matrix_collector = {}
        for target_trial_df, trial_type, trial_idx in \
                _lss_events_iterator(self.inputs.events_file):

            # fit the model for the target trial
            model.fit(self.inputs.bold_file,
                      events=target_trial_df,
                      confounds=confounds)

            if self.inputs.hrf_model == 'fir':
                # FS modeling
                for delay in self.inputs.fir_delays:
                    delay_ttype = trial_type + '_delay_{}'.format(delay)
                    new_delay_ttype = delay_ttype.replace(
                        '_delay_{}'.format(delay), 'Delay{}Vol'.format(delay))
                    beta_map = _calc_beta_map(model, delay_ttype,
                                              self.inputs.hrf_model,
                                              self.inputs.return_tstat)
                    if new_delay_ttype in beta_maps:
                        beta_maps[new_delay_ttype].append(beta_map)
                    else:
                        beta_maps[new_delay_ttype] = [beta_map]
            else:
                # calculate the beta map
                beta_map = _calc_beta_map(model, trial_type,
                                          self.inputs.hrf_model,
                                          self.inputs.return_tstat)
                design_matrix_collector[trial_idx] = model.design_matrices_[0]
                # assign beta map to appropriate list
                if trial_type in beta_maps:
                    beta_maps[trial_type].append(beta_map)
                else:
                    beta_maps[trial_type] = [beta_map]

            # add up all the residuals (to be divided later)
            if residuals is None:
                residuals = model.residuals[0].get_fdata()
            else:
                residuals += model.residuals[0].get_fdata()

        # make an average residual
        ave_residual = residuals / (trial_idx + 1)
        # make residual nifti image
        residual_file = os.path.join(runtime.cwd, 'desc-residuals_bold.nii.gz')
        nib.Nifti2Image(
            ave_residual,
            model.residuals[0].affine,
            model.residuals[0].header,
        ).to_filename(residual_file)
        # make a beta series from each beta map list
        beta_series_template = os.path.join(
            runtime.cwd, 'desc-{trial_type}_betaseries.nii.gz')
        # collector for the betaseries files
        beta_series_lst = []
        for t_type, betas in beta_maps.items():
            beta_series = nib.funcs.concat_images(betas)
            nib.save(beta_series,
                     beta_series_template.format(trial_type=t_type))
            beta_series_lst.append(
                beta_series_template.format(trial_type=t_type))

        self._results['beta_maps'] = beta_series_lst
        self._results['design_matrices'] = design_matrix_collector
        self._results['residual'] = residual_file
        return runtime
Exemple #13
0
    def _run_interface(self, runtime):
        from nistats import first_level_model
        import os

        # get t_r from bold_metadata
        t_r = self.inputs.bold_metadata['RepetitionTime']

        # get the confounds:
        if self.inputs.confounds_file and self.inputs.selected_confounds:
            confounds = _select_confounds(self.inputs.confounds_file,
                                          self.inputs.selected_confounds)
        else:
            confounds = None

        # setup the model
        model = first_level_model.FirstLevelModel(
            t_r=t_r,
            slice_time_ref=0,
            hrf_model=self.inputs.hrf_model,
            mask=self.inputs.mask_file,
            smoothing_fwhm=self.inputs.smoothing_kernel,
            signal_scaling=self.inputs.signal_scaling,
            high_pass=self.inputs.high_pass,
            drift_model='cosine',
            verbose=1,
            minimize_memory=False,
        )

        # initialize dictionary to contain trial estimates (betas)
        beta_maps = {}
        lsa_df = _lsa_events_converter(self.inputs.events_file)
        model.fit(self.inputs.bold_file, events=lsa_df, confounds=confounds)
        design_matrix = model.design_matrices_[0]
        for i_trial in lsa_df.index:
            t_name = lsa_df.loc[i_trial, 'trial_type']
            t_type = lsa_df.loc[i_trial, 'original_trial_type']

            # calculate the beta map
            beta_map = _calc_beta_map(model, t_name, self.inputs.hrf_model,
                                      self.inputs.return_tstat)

            # assign beta map to appropriate list
            if t_type in beta_maps:
                beta_maps[t_type].append(beta_map)
            else:
                beta_maps[t_type] = [beta_map]

        # calculate the residual
        residual_file = os.path.join(runtime.cwd, 'desc-residuals_bold.nii.gz')
        model.residuals[0].to_filename(residual_file)
        # make a beta series from each beta map list
        beta_series_template = os.path.join(
            runtime.cwd, 'desc-{trial_type}_betaseries.nii.gz')
        # collector for the betaseries files
        beta_series_lst = []
        for t_type, betas in beta_maps.items():
            beta_series = nib.funcs.concat_images(betas)
            nib.save(beta_series,
                     beta_series_template.format(trial_type=t_type))
            beta_series_lst.append(
                beta_series_template.format(trial_type=t_type))

        self._results['beta_maps'] = beta_series_lst
        self._results['design_matrices'] = [design_matrix]
        self._results['residual'] = residual_file
        return runtime