Ejemplo n.º 1
0
def test_z_score_opposite_contrast():
    fmri, mask = generate_fake_fmri(shape=(50, 20, 50),
                                    length=96,
                                    rand_gen=np.random.RandomState(42))

    nifti_masker = NiftiMasker(mask_img=mask)
    data = nifti_masker.fit_transform(fmri)

    frametimes = np.linspace(0, (96 - 1) * 2, 96)

    for i in [0, 20]:
        design_matrix = make_first_level_design_matrix(
            frametimes,
            hrf_model='spm',
            add_regs=np.array(data[:, i]).reshape(-1, 1))
        c1 = np.array([1] + [0] * (design_matrix.shape[1] - 1))
        c2 = np.array([0] + [1] + [0] * (design_matrix.shape[1] - 2))
        contrasts = {'seed1 - seed2': c1 - c2, 'seed2 - seed1': c2 - c1}
        fmri_glm = FirstLevelModel(t_r=2.,
                                   noise_model='ar1',
                                   standardize=False,
                                   hrf_model='spm',
                                   drift_model='cosine')
        fmri_glm.fit(fmri, design_matrices=design_matrix)
        z_map_seed1_vs_seed2 = fmri_glm.compute_contrast(
            contrasts['seed1 - seed2'], output_type='z_score')
        z_map_seed2_vs_seed1 = fmri_glm.compute_contrast(
            contrasts['seed2 - seed1'], output_type='z_score')
        assert_almost_equal(z_map_seed1_vs_seed2.get_data().min(),
                            -z_map_seed2_vs_seed1.get_data().max(),
                            decimal=10)
        assert_almost_equal(z_map_seed1_vs_seed2.get_data().max(),
                            -z_map_seed2_vs_seed1.get_data().min(),
                            decimal=10)
Ejemplo n.º 2
0
def create_bmap(sub_dir: Union[str, os.PathLike],
                noise_model: str = 'ar1',
                hrf_model: str = 'spm',
                drift_model: str = None,
                fwhm: int = 8,
                **kwargs):
    sub_id = os.path.basename(sub_dir)
    tsk_prfx = '_ses-04_run-01_task-memory_'
    outfile_suffix = '_bmap-effectsize.nii.gz'
    confounds = pd.read_csv(pjoin(sub_dir,
                                  sub_id + tsk_prfx + 'confounds.tsv'),
                            sep='\t')
    events = pd.read_csv(pjoin(sub_dir, sub_id + tsk_prfx + 'events.tsv'),
                         sep='\t')
    contrast_list = []
    sub_out_dir = pjoin(xpu(output_dir), 'derivatives', sub_id, 'beta_maps')
    out_filename = pjoin(sub_out_dir, sub_id + outfile_suffix)
    os.makedirs(sub_out_dir, exist_ok=True)
    fmri_img = nib.load(pjoin(sub_dir, sub_id + tsk_prfx + 'bold.nii.gz'))
    nscans, t_r = fmri_img.shape[-1], fmri_img.header.get_zooms()[-1]
    frame_times = frame_times = np.arange(confounds.shape[0]) * t_r
    for row in tqdm(list(events.iterrows())):
        tnum = row[1].trial_number
        events['trial_type'] = [
            'X_' + row[1].condition
            if row[1].trial_number != tnum else row[1].condition
            for row in events.iterrows()
        ]
        mat_params = {
            'frame_times': frame_times,
            'events': events[['onset', 'duration', 'trial_type']],
            'add_regs': confounds,
            'drift_model': drift_model,
            'hrf_model': hrf_model
        }
        trial_matrix = make_first_level_design_matrix(**mat_params)
        trial_contrast = pd.Series(
            np.array([1] +
                     list(np.repeat(0, trial_matrix.shape[1] - 1)))).values
        glm_params = {
            't_r': t_r,
            'drift_model': drift_model,
            'standardize': True,
            'noise_model': noise_model,
            'hrf_model': hrf_model,
            'smoothing_fwhm': fwhm
        }
        fit_params = {'run_imgs': fmri_img, 'design_matrices': trial_matrix}
        con_params = {
            'contrast_def': trial_contrast,
            'output_type': 'effect_size'
        }
        contrast_list.append(
            FirstLevelModel().fit(**fit_params).compute_contrast(**con_params))
    nib.save(img=nilearn.image.concat_imgs(contrast_list),
             filename=out_filename)
Ejemplo n.º 3
0
def get_mapper_response_hrf(subject, session, sourcedata):

    assert (session[-1] == '1')

    behavior = get_behavior(subject, session, sourcedata)

    responses = behavior.xs(
        'response', 0, 'trial_type',
        drop_level=False).reset_index('trial_type')[['onset', 'trial_type']]
    responses['duration'] = 0.0
    responses = responses[responses.onset > 0]

    tr = get_tr(subject, session)
    frametimes = np.linspace(0, (125 - 1) * tr, 125)

    response_hrf = responses.groupby('run').apply(
        lambda d: make_first_level_design_matrix(frametimes, drift_order=0))
    return response_hrf[['response']]
Ejemplo n.º 4
0
def create_brsa_matrix(subject_dir, events, n_vol):
    """Create a design matrix for Bayesian RSA."""
    # load confound files
    runs = events['run'].unique()
    n_run = len(runs)
    confound = {}
    for run in runs:
        confound_file = os.path.join(
            subject_dir, 'BOLD', f'functional_run_{run}', 'QA', 'confound.txt'
        )
        confound[run] = np.loadtxt(confound_file)

    # explanatory variables of interest
    n_ev = events['trial_type'].nunique()
    evs = np.arange(1, n_ev + 1)

    # create full design matrix
    df_list = []
    frame_times = np.arange(n_vol / n_run) * 2
    for run in runs:
        # create a design matrix with one column per trial type and confounds
        df_run = first_level.make_first_level_design_matrix(
            frame_times, events=events.query(f'run == {run}'), add_regs=confound[run]
        )

        # reorder columns for consistency across runs; confounds go last
        regs = df_run.filter(like='reg', axis=1).columns
        drifts = df_run.filter(like='drift', axis=1).columns
        columns = np.hstack((evs, drifts, ['constant'], regs))
        df_list.append(df_run.reindex(columns=columns))
    df_mat = pd.concat(df_list, axis=0)

    # with confounds included, the number of regressors varies by run.
    # Columns missing between runs are set to NaN
    df_mat.fillna(0, inplace=True)

    # package for use with BRSA
    mat = df_mat.to_numpy()[:, :n_ev]
    nuisance = df_mat.to_numpy()[:, n_ev:]
    scan_onsets = np.arange(0, n_vol, n_vol / n_run)
    return mat, nuisance, scan_onsets
Ejemplo n.º 5
0
def get_contrasts(fmri_img: Union[str, PathLike,
                                  PosixPath, Nifti1Image],
                  events: Union[str, PathLike,
                                PosixPath, pd.DataFrame],
                  desc: str = 'effect_size',
                  design_kws: Union[dict, Bunch] = None,
                  glm_kws: Union[dict, Bunch] = None,
                  masker_kws: Union[dict, Bunch] = None,
                  standardize: bool = True,
                  scale: bool = False,
                  scale_between: tuple = (0, 1),
                  maximize: bool = False,
                  masker: [MultiNiftiMasker, NiftiLabelsMasker,
                           NiftiMapsMasker, NiftiMasker] = None,
                  feature_labels: Union[Sequence, pd.Index] = None,
                  session=None,
                  **kwargs
                  ) -> Bunch:
    """
    Return dict-like structure containing experimental contrasts.


    Using ``nilearn.glm.first_level.FirstLevel`` object,
    contrasts are first computed trial-wise. Then, the same is done
    for each experimental condition in ``trial_type_cols`` if a
    list of string is provided.

    Args:
        fmri_img: str, PathLike, PosixPath or Nifti1Image
            In-memory or path pointing to a ``nibabel.nifti1.Nifti1Image``.

        events: : str, PathLike, PosixPath or DataFrame
            In-memory or path pointing to a ``pandas.DataFrame``.

        desc: str (Default = 'effect_size')
            String passed to
            ``nilearn.glm.first_level.FirstLevel.compute_contrast``
            ``desc`` parameter.

        design_kws: dict or Bunch (Deault = None)
            Dict-like mapping of keyword arguments passed to
            ``nilearn.glm.first_level.make_first_level_design_matrix``.
            If a ``session`` object is passed in the parameters,
            the value under the corresponding key is used.

        glm_kws: dict or Bunch (Deault = None)
            Dict-like mapping of keyword arguments passed to
            ``nilearn.glm.first_level.FirstLevel.__init__``.
            If a ``session`` object is passed in the parameters,
            the value under the corresponding key is used.

        masker_kws: dict or Bunch (Deault = None)
            Dict-like mapping of keyword arguments passed to
            ``masker.__init__``.
            If a ``session`` object is passed in the parameters,
            the value under the corresponding key is used.

        standardize: bool (Default = True)
            If true (by default), the extracted brain signals are
            standardized using a ``sklearn.preprocessing.StandardScaler``
            object (demeaning ans scaling to variance). It is generally
            advised to standardize data for machine-learning operations.
            See notes for documentation, tutorials and more.

        scale: bool (Default = False)
            If true, the extracted brain signals are
            scaled (between 0 and 1 by default) using a
            ``sklearn.preprocessing.MinMaxScaler`` object. It is generally
            advised to standardize data for machine-learning operations.
            See notes for documentation, tutorials and more.

        scale_between: tuple (Default = (0, 1)
            Values between which the signal should be scaled.
            Default is (0, 1) - left = min, right = max.
            Only used if ``scale`` parameter is True.

        maximize: bool (Default = False)
            If true, scale each feature by its maximum absolute value.
            From the docs of ``sklearn.preprocessing.MaxAbsScaler``:
                '[...] Scales and translates each feature individually
                such that the maximal absolute value of each feature in
                training set is 1.0. Does not shift/center the data,
                and thus does not destroy any sparsity.'

        masker: MultiNiftiMasker, NiftiLabelsMasker,
                NiftiMapsMasker or NiftiMasker (Default = None)
            Masker object from the ``nilearn.input_data`` module meant
            to perform brain signal extraction (conversion from 4D or 3D
            image to 2D data).
            If omitted, a NiftiMasker with default parameters is used.

        feature_labels: List or pd.Index (Default = None)
            List of feature names used as columns for the brain signal matrix.
            Number of labels and number of features must match.
            An error is raised otherwise.

        session: dict or Bunch (Default = None)
            Dict-like structure containing all required and/or optional
            parameters. The functions ``fetch_fmriprep_session`` and
            ``get_fmri_session`` from ``cimaq_decoding_utils``
            return a ``session`` object. It is similar to the return
            values of ``nilearn.datasets.fetch{dataset_name}`` functions.

    Returns: ``sklearn.utils.Bunch``
        Dict-like structure with the following keys:
        ['model', 'contrast_img', 'signals',
         'feature_labels', 'condition_labels']

    Notes:
        https://scikit-learn.org/stable/modules/preprocessing.html#preprocessing
    """

    from sklearn.pipeline import Pipeline
    from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler
    from sklearn.preprocessing import StandardScaler
    from cimaq_decoding_utils import get_frame_times, get_t_r

    # Parameter initialization
    design_defs, glm_defs = {}, {}
    fmri_img = nimage.image.load_img(fmri_img)
    events = [events if isinstance(events, pd.DataFrame)
              else pd.read_csv(events,
                               sep=['\t' if splitext(events)[1][1] == 't'
                                    else ','][0])][0]
    if session is not None:
        design_defs.update(session.design_defs)
        glm_defs.update(session.glm_defs)

    t_r, frame_times = get_t_r(fmri_img), get_frame_times(fmri_img)

    if design_kws is not None:
        design_defs.update(design_kws)
    if glm_kws is not None:
        glm_defs.update(glm_kws)

    # GLM initialization and contrast computation
    design = make_first_level_design_matrix(frame_times, events=events.iloc[1:, :],
                                            **design_defs)

    model = FirstLevelModel(**glm_defs).fit(run_imgs=fmri_img,
                                            design_matrices=design.iloc[:, 1:])
    contrasts = nimage.concat_imgs([model.compute_contrast(
                    trial, desc=desc) for trial in
                    tqdm_(design.columns[:-1].astype(str),
                          ncols=100,
                          desc='Computing Contrasts')])

    # Brain signals extraction
    pipe_components = ((standardize, 'standardize', StandardScaler()),
                       (maximize, 'maximize', MaxAbsScaler()),
                       (scale, 'scale', MinMaxScaler(scale_between)))

    pipe_components = [item[1:] for item in
                       list(filter(lambda x: x[0], pipe_components))]
    signals = masker.transform_single_imgs(contrasts)
    if pipe_components != []:
        pipeline = Pipeline(pipe_components)
        signals = pipeline.fit_transform(signals)
    signals = pd.DataFrame(signals,
                           index=design.iloc[:, :-1].columns)

    if feature_labels is not None:
        signals.set_axis(feature_labels, axis=1, inplace=True)

    return Bunch(model=model, contrast_img=contrasts,
                 signals=signals, feature_labels=feature_labels)
Ejemplo n.º 6
0
def main(study_dir, subject, roi, res_dir, blocks='combined'):
    # load task information
    vols = rsa.load_vol_info(study_dir, subject)

    if blocks == 'walk':
        events = vols.query('sequence_type == 1').copy()
    elif blocks == 'random':
        events = vols.query('sequence_type == 2').copy()
    elif blocks in ['combined', 'separate']:
        events = vols.query('sequence_type > 0').copy()
        if blocks == 'separate':
            # separately model trials in walk and random blocks
            n_item = events['trial_type'].nunique()
            events['trial_type'] = (events['trial_type'] +
                                    (events['sequence_type'] - 1) * n_item)
    else:
        raise ValueError(f'Invalid blocks option: {blocks}')

    # get mask image
    subject_dir = os.path.join(study_dir, f'tesser_{subject}')
    mask_image = os.path.join(subject_dir, 'anatomy', 'antsreg', 'data',
                              'funcunwarpspace', 'rois', 'mni',
                              f'{roi}.nii.gz')

    # load masked functional images
    runs = list(range(1, 7))
    bold_images = [
        os.path.join(subject_dir, 'BOLD', 'antsreg', 'data',
                     f'functional_run_{run}_bold_mcf_brain_corr_notemp.feat',
                     'filtered_func_data.nii.gz') for run in runs
    ]
    masker = input_data.NiftiMasker(mask_img=mask_image, standardize='zscore')
    image = np.vstack(
        [masker.fit_transform(bold_image) for bold_image in bold_images])

    # load confound files
    confound = {}
    for run in runs:
        confound_file = os.path.join(subject_dir, 'BOLD',
                                     f'functional_run_{run}', 'QA',
                                     'confound.txt')
        confound[run] = np.loadtxt(confound_file)

    # create full design matrix
    frame_times = np.arange(image.shape[0] / len(runs)) * 2
    n_ev = events['trial_type'].nunique()
    evs = np.arange(1, n_ev + 1)
    df_list = []
    for run in runs:
        df_run = fl.make_first_level_design_matrix(
            frame_times,
            events=events.query(f'run == {run}'),
            add_regs=confound[run])
        regs = df_run.filter(like='reg', axis=1).columns
        drifts = df_run.filter(like='drift', axis=1).columns
        columns = np.hstack((evs, drifts, ['constant'], regs))
        df_list.append(df_run.reindex(columns=columns))
    df_mat = pd.concat(df_list, axis=0)

    # with confounds included, the number of regressors varies by run.
    # Columns missing between runs are set to NaN
    df_mat.fillna(0, inplace=True)

    mat = df_mat.to_numpy()[:, :n_ev]
    nuisance = df_mat.to_numpy()[:, n_ev:]

    # run Bayesian RSA
    scan_onsets = np.arange(0, image.shape[0], image.shape[0] / len(runs))
    model = brsa.GBRSA()
    model.fit([image], [mat], nuisance=nuisance, scan_onsets=scan_onsets)

    # save results
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)
    var_names = [
        'U', 'L', 'C', 'nSNR', 'sigma', 'rho', 'beta', 'beta0', 'X0',
        'beta0_null', 'X0_null', 'n_nureg'
    ]
    results = {var: getattr(model, var + '_') for var in var_names}
    out_file = os.path.join(res_dir, f'sub-{subject}_brsa.npz')
    np.savez(out_file, **results)
def main(subject,
         session,
         sourcedata,
         smoothed=False,
         pca_confounds=False,
         space='fsnative',
         n_jobs=14):

    derivatives = op.join(sourcedata, 'derivatives')

    base_dir = 'glm_stim1_surf'

    if smoothed:
        base_dir += '.smoothed'

    if pca_confounds:
        base_dir += '.pca_confounds'

    base_dir = op.join(derivatives, base_dir, f'sub-{subject}',
                       f'ses-{session}', 'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    runs = range(1, 9)

    behavior = []
    for run in runs:
        behavior.append(
            pd.read_table(
                op.join(
                    sourcedata,
                    f'sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_events.tsv'
                )))

    behavior = pd.concat(behavior, keys=runs, names=['run'])
    behavior['subject'] = subject
    behavior = behavior.reset_index().set_index(
        ['subject', 'run', 'trial_type'])

    stimulus1 = behavior.xs('stimulus 1', 0, 'trial_type',
                            drop_level=False).reset_index('trial_type')[[
                                'onset', 'trial_nr', 'trial_type'
                            ]]
    stimulus1['duration'] = 0.6
    stimulus1['trial_type'] = stimulus1.trial_nr.map(
        lambda trial: f'trial_{trial}')

    print(stimulus1)

    stimulus2 = behavior.xs(
        'stimulus 2', 0, 'trial_type',
        drop_level=False).reset_index('trial_type')[['onset', 'trial_type']]
    stimulus2['duration'] = 0.6

    n2 = behavior.xs('stimulus 2', 0, 'trial_type',
                     drop_level=False).reset_index('trial_type')[[
                         'onset', 'trial_type', 'n2'
                     ]]
    n2['duration'] = 0.6

    def zscore(n):
        return (n - n.mean()) / n.std()

    n2['modulation'] = zscore(n2['n2'])
    n2['trial_type'] = 'n_dots2'

    p2 = behavior.xs('stimulus 2', 0, 'trial_type',
                     drop_level=False).reset_index('trial_type')[[
                         'onset', 'trial_type', 'prob2'
                     ]]
    p2 = p2[p2.prob2 == 1.0]
    p2['duration'] = 0.6
    p2['trial_type'] = 'certain2'

    events = pd.concat((stimulus1, stimulus2, n2, p2)).sort_values('onset')
    events['modulation'].fillna(1.0, inplace=True)

    # # sub-02_ses-7t2_task-task_run-1_space-fsaverage_hemi-R_bold.func

    keys = [(run, hemi) for run, hemi in product(runs, ['L', 'R'])]

    if smoothed:
        surfs = [
            op.join(
                sourcedata,
                f'derivatives/smoothed/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_space-{space}_hemi-{hemi}_desc-smoothed_bold.func.gii'
            ) for run, hemi in keys
        ]
    else:
        surfs = [
            op.join(
                sourcedata,
                f'derivatives/fmriprep/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_space-{space}_hemi-{hemi}_bold.func.gii'
            ) for run, hemi in keys
        ]

    fmriprep_confounds_include = [
        'global_signal', 'dvars', 'framewise_displacement', 'trans_x',
        'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z', 'a_comp_cor_00',
        'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03', 'cosine00',
        'cosine01', 'cosine02', 'cosine03', 'non_steady_state_outlier00',
        'non_steady_state_outlier01', 'non_steady_state_outlier02'
    ]
    fmriprep_confounds = [
        op.join(
            sourcedata,
            f'derivatives/fmriprep/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_desc-confounds_timeseries.tsv'
        ) for run, hemi in keys
    ]
    fmriprep_confounds = [
        pd.read_table(cf)[fmriprep_confounds_include]
        for cf in fmriprep_confounds
    ]

    retroicor_confounds = [
        op.join(
            sourcedata,
            f'derivatives/physiotoolbox/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_desc-retroicor_timeseries.tsv'
        ) for run, hemi in keys
    ]
    retroicor_confounds = [
        pd.read_table(cf, header=None, usecols=range(18))
        if op.exists(cf) else pd.DataFrame(np.zeros((160, 0)))
        for cf in retroicor_confounds
    ]

    confounds = [
        pd.concat((rcf, fcf), axis=1)
        for rcf, fcf in zip(retroicor_confounds, fmriprep_confounds)
    ]
    confounds = [c.fillna(method='bfill') for c in confounds]

    t_r, n_scans = 2.3, 160
    frame_times = t_r * (np.arange(n_scans) + .5)

    betas = []

    n_verts = {}

    for (run, hemi), cf, surf in zip(keys, confounds, surfs):
        e = events.xs(run, 0, 'run')
        Y = surface.load_surf_data(surf).T

        n_verts[hemi] = Y.shape[1]

        if len(Y) == 213:
            Y = Y[:160]
            cf = cf.iloc[:160]

        if pca_confounds:
            pca = PCA(n_components=13)
            cf -= cf.mean(0)
            cf /= cf.std(0)
            cf = pca.fit_transform(cf)
            print('PCA size: ', cf.shape)

        X = make_first_level_design_matrix(
            frame_times,
            events=e,
            hrf_model='glover',
            high_pass=False,
            drift_model=None,
            add_regs=cf,
        )

        Y = (Y / Y.mean(0) * 100)
        Y -= Y.mean(0)

        fit = run_glm(Y, X, noise_model='ols', n_jobs=n_jobs)
        r = fit[1][0.0]
        betas.append(pd.DataFrame(r.theta, index=X.columns))

    betas = pd.concat(betas, keys=keys, names=['run', 'hemi'])
    betas.reset_index('run', drop=True, inplace=True)
    betas = betas.loc[(slice(None), stimulus1.trial_type), :].unstack(
        'hemi', fill_value=-1e6).swaplevel(axis=1).sort_index(axis=1)

    for hemi in ['L', 'R']:
        b = betas[hemi].loc[:, :n_verts[hemi] - 1]
        print(b)
        gii = nb.gifti.GiftiImage(
            header=nb.load(surfs[['L', 'R'].index(hemi)]).header,
            darrays=[nb.gifti.GiftiDataArray(row) for _, row in b.iterrows()])

        fn_template = op.join(
            base_dir,
            'sub-{subject}_ses-{session}_task-task_space-{space}_desc-stims1_hemi-{hemi}.pe.gii'
        )

        gii.to_filename(fn_template.format(**locals()))
Ejemplo n.º 8
0
def make_first_level_design_matrix(raw,
                                   stim_dur=1.,
                                   hrf_model='glover',
                                   drift_model='cosine',
                                   high_pass=0.01,
                                   drift_order=1,
                                   fir_delays=[0],
                                   add_regs=None,
                                   add_reg_names=None,
                                   min_onset=-24,
                                   oversampling=50):
    """
    Generate a design matrix for the experiment.

    This is a wrapper function for
    nilearn.stats.first_level_model.make_first_level_design_matrix.

    Parameters
    ----------
    raw : instance of Raw
        Haemoglobin data.

    stim_dur : Number
        The length of your stimulus.

    hrf_model : {'glover', 'spm', 'spm + derivative',
         'spm + derivative + dispersion',
        'glover + derivative', 'glover + derivative + dispersion',
        'fir', None}, optional
        Specifies the hemodynamic response function. Default='glover'.

    drift_model : {'cosine', 'polynomial', None}, optional
        Specifies the desired drift model. Default='cosine'.

    high_pass : float, optional
        High-pass frequency in case of a cosine model (in Hz).
        Default=0.01.

    drift_order : int, optional
        Order of the drift model (in case it is polynomial).
        Default=1.

    fir_delays : array of shape(n_onsets) or list, optional
        In case of FIR design, yields the array of delays used in the FIR
        model (in scans). Default=[0].

    add_regs : array of shape(n_frames, n_add_reg) or pandas DataFrame
        additional user-supplied regressors, e.g. data driven noise regressors
        or seed based regressors.

    add_reg_names : list of (n_add_reg,) strings, optional
        If None, while add_regs was provided, these will be termed
        'reg_%i', i = 0..n_add_reg - 1
        If add_regs is a DataFrame, the corresponding column names are used
        and add_reg_names is ignored.

    min_onset : float, optional
        Minimal onset relative to frame_times[0] (in seconds)
        events that start before frame_times[0] + min_onset are not considered.
        Default=-24.

    oversampling : int, optional
        Oversampling factor used in temporal convolutions. Default=50.

    Returns
    -------
    design_matrix : DataFrame instance,
        holding the computed design matrix, the index being the frames_times
        and each column a regressor.

    """
    from nilearn.glm.first_level import make_first_level_design_matrix
    from pandas import DataFrame

    frame_times = raw.times

    # Create events for nilearn
    conditions = raw.annotations.description
    onsets = raw.annotations.onset - raw.first_time
    duration = stim_dur * np.ones(len(conditions))
    events = DataFrame({
        'trial_type': conditions,
        'onset': onsets,
        'duration': duration
    })

    dm = make_first_level_design_matrix(frame_times,
                                        events,
                                        drift_model=drift_model,
                                        drift_order=drift_order,
                                        hrf_model=hrf_model,
                                        min_onset=min_onset,
                                        high_pass=high_pass,
                                        add_regs=add_regs,
                                        oversampling=oversampling,
                                        add_reg_names=add_reg_names,
                                        fir_delays=fir_delays)

    return dm
Ejemplo n.º 9
0
def sub_tcontrasts3(session:Union[dict,Bunch]=None,
                    sub_id:str=None,
                    tr:float=None,
                    frame_times:list=None,
                    hrf_model:str=None,
                    events:pd.DataFrame=None,
                    fmri_img:Nifti1Image=None,
                    sub_outdir:Union[str,os.PathLike]=None):
    """
    Create beta values maps using nilearn first-level model.

    The beta values correspond to the following contrasts between conditions:
    correctsource (cs), wrongsource (ws), cs_minus_ws, cs_minus_miss,
    ws_minus_miss, cs_minus_ctl, ws_minus_ctl
    hit, miss, hit_minus_miss, hit_minus_ctl and miss_minus_ctl

    Parameters:
    ----------
    sub_id: string (subject's dccsub_id)
    tr: float (length of time to repetition, in seconds)
    frames_times: list of float (onsets of fMRI frames, in seconds)
    hrf_model: string (type of HRF model)
    confounds: pandas dataframe (motion and other noise regressors)
    all_events: string (task information: trials' onset time, duration and label)
    fmrsub_idir: string (path to directory with fMRI data)
    outdir: string (path to subject's image output directory)

    Return:
    ----------
    None (beta maps are exported in sub_outdir)
    """
    if isinstance(session, dict):
        session = Bunch(**session)    
    # Model 1: encoding vs control conditions
    events3 = session.events.copy(deep = True)
    cols = ['onset', 'duration', 'ctl_miss_ws_cs']
    events3 = events3[cols]
    events3.rename(columns={'ctl_miss_ws_cs':'trial_type'}, inplace=True)

    # create the model - Should data be standardized?
    model3 = FirstLevelModel(**session.glm_defs)

    # create the design matrices
    design3 = make_first_level_design_matrix(events=events3, **session.design_defs)

    # fit model with design matrix
    model3 = model3.fit(session.cleaned_fmri, design_matrices = design3)

    # Condition order: control, correct source, missed, wrong source (alphabetical)
    #contrast 3.1: wrong source
    ws_vec = np.repeat(0, design3.shape[1])
    ws_vec[3] = 1
    b31_map = model3.compute_contrast(ws_vec, output_type='effect_size') #"effect_size" for betas
    b31_name = f'betas_{session.sub_id}_ws.nii'

    #contrast 3.2: correct source
    cs_vec = np.repeat(0, design3.shape[1])
    cs_vec[1] = 1
    b32_map = model3.compute_contrast(cs_vec, output_type='effect_size') #"effect_size" for betas
    b32_name = f'betas_{session.sub_id}_cs.nii'

    #contrast 3.3: correct source minus wrong source
    cs_minus_ws_vec = np.repeat(0, design3.shape[1])
    cs_minus_ws_vec[1] = 1
    cs_minus_ws_vec[3] = -1
    b33_map = model3.compute_contrast(cs_minus_ws_vec, output_type='effect_size') #"effect_size" for betas
    b33_name = f'betas_{session.sub_id}_cs_minus_ws.nii'

    #contrast 3.4: correct source minus miss
    cs_minus_miss_vec = np.repeat(0, design3.shape[1])
    cs_minus_miss_vec[1] = 1
    cs_minus_miss_vec[2] = -1
    b34_map = model3.compute_contrast(cs_minus_miss_vec, output_type='effect_size') #"effect_size" for betas
    b34_name = f'betas_{session.sub_id}_cs_minus_miss.nii'

    #contrast 3.5: wrong source minus miss
    ws_minus_miss_vec = np.repeat(0, design3.shape[1])
    ws_minus_miss_vec[3] = 1
    ws_minus_miss_vec[2] = -1
    b35_map = model3.compute_contrast(ws_minus_miss_vec, output_type='effect_size') #"effect_size" for betas
    b35_name = f'betas_{session.sub_id}_ws_minus_miss.nii'

    #contrast 3.6: correct source minus control
    cs_minus_ctl_vec = np.repeat(0, design3.shape[1])
    cs_minus_ctl_vec[1] = 1
    cs_minus_ctl_vec[0] = -1
    b36_map = model3.compute_contrast(cs_minus_ctl_vec, output_type='effect_size') #"effect_size" for betas
    b36_name = f'betas_{session.sub_id}_cs_minus_ctl.nii'

    #contrast 3.7: wrong source minus control
    ws_minus_ctl_vec = np.repeat(0, design3.shape[1])
    ws_minus_ctl_vec[3] = 1
    ws_minus_ctl_vec[0] = -1
    b37_map = model3.compute_contrast(ws_minus_ctl_vec, output_type='effect_size') #"effect_size" for betas
    b37_name = f'betas_{session.sub_id}_ws_minus_ctl.nii'

    contrasts = ((b31_map, b31_name), (b32_map, b32_name), (b33_map, b33_name),
                 (b34_map, b34_name), (b35_map, b35_name), (b36_map, b36_name),
                 (b37_map, b37_name))
    if sub_outdir is not None:
        savedir = os.path.join(sub_outdir, session.sub_id, session.ses_id)
        os.makedirs(savedir, exist_ok=True)
        [nibabel.save(*contrast) for contrast in contrasts]

    return contrasts
Ejemplo n.º 10
0
# ------------------
# Specify the contrasts.
seed_masker = NiftiSpheresMasker([pcc_coords],
                                 radius=10,
                                 detrend=True,
                                 standardize=True,
                                 low_pass=0.1,
                                 high_pass=0.01,
                                 t_r=2.,
                                 memory='nilearn_cache',
                                 memory_level=1,
                                 verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_first_level_design_matrix(frametimes,
                                               hrf_model='spm',
                                               add_regs=seed_time_series,
                                               add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM.
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# Estimate the contrast.
print('Contrast seed_based_glm computed.')
# We start by specifying the timing of fMRI frames.

import numpy as np

n_scans = texture.shape[1]
frame_times = t_r * (np.arange(n_scans) + .5)

###############################################################################
# Create the design matrix.
#
# We specify an hrf model containing the Glover model and its time derivative
# The drift model is implicitly a cosine basis with a period cutoff at 128s.
from nilearn.glm.first_level import make_first_level_design_matrix

design_matrix = make_first_level_design_matrix(frame_times,
                                               events=events,
                                               hrf_model='glover + derivative')

###############################################################################
# Setup and fit GLM.
#
# Note that the output consists in 2 variables: `labels` and `fit`.
# `labels` tags voxels according to noise autocorrelation.
# `estimates` contains the parameter estimates.
# We keep them for later contrast computation.

from nilearn.glm.first_level import run_glm

labels, estimates = run_glm(texture.T, design_matrix.values)

###############################################################################
Ejemplo n.º 12
0
def simulate_nirs_raw(sfreq=3.,
                      amplitude=1.,
                      annot_desc='A',
                      sig_dur=300.,
                      stim_dur=5.,
                      isi_min=15.,
                      isi_max=45.,
                      ch_name='Simulated',
                      hrf_model='glover'):
    """
    Create simulated fNIRS data.

    The returned data is of type `hbo`.
    One or more conditions can be simulated.
    To simulate multiple conditions pass in a description and amplitude
    for each
    `amplitude=[0., 2., 4.], annot_desc=['Control', 'Cond_A', 'Cond_B']`.

    Parameters
    ----------
    sfreq : Number
        The sample rate.
    amplitude : Number, Array of numbers
        The amplitude of the signal to simulate in uM.
        Pass in an array to simulate multiple conditions.
    annot_desc : str, Array of str
        The name of the annotations for simulated amplitudes.
        Pass in an array to simulate multiple conditions,
        must be the same length as amplitude.
    sig_dur : Number
        The length of the boxcar signal to generate in seconds that will
        be convolved with the HRF.
    stim_dur : Number, Array of numbers
        The length of the stimulus to generate in seconds.
    isi_min : Number
        The minimum duration of the inter stimulus interval in seconds.
    isi_max : Number
        The maximum duration of the inter stimulus interval in seconds.
    ch_name : str
        Channel name to be used in returned raw instance.
    hrf_model : str
        Specifies the hemodynamic response function. See nilearn docs.

    Returns
    -------
    raw : instance of Raw
        The generated raw instance.
    """
    from nilearn.glm.first_level import make_first_level_design_matrix
    from pandas import DataFrame

    if type(amplitude) is not list:
        amplitude = [amplitude]
    if type(annot_desc) is not list:
        annot_desc = [annot_desc]
    if type(stim_dur) is not list:
        stim_dur = [stim_dur]

    frame_times = np.arange(sig_dur * sfreq) / sfreq

    assert len(amplitude) == len(annot_desc), "Same number of amplitudes as " \
                                              "annotations required."
    assert len(amplitude) == len(stim_dur), "Same number of amplitudes as " \
                                            "durations required."

    onset = 0.
    onsets = []
    conditions = []
    durations = []
    while onset < sig_dur - 60:
        c_idx = np.random.randint(0, len(amplitude))
        onset += np.random.uniform(isi_min, isi_max) + stim_dur[c_idx]
        onsets.append(onset)
        conditions.append(annot_desc[c_idx])
        durations.append(stim_dur[c_idx])

    events = DataFrame({
        'trial_type': conditions,
        'onset': onsets,
        'duration': durations
    })

    dm = make_first_level_design_matrix(frame_times,
                                        events,
                                        hrf_model=hrf_model,
                                        drift_model='polynomial',
                                        drift_order=0)
    dm = dm.drop(columns='constant')

    annotations = Annotations(onsets, durations, conditions)

    info = create_info(ch_names=[ch_name], sfreq=sfreq, ch_types=['hbo'])

    for idx, annot in enumerate(annot_desc):
        if annot in dm.columns:
            dm[annot] *= amplitude[idx]

    a = np.sum(dm.to_numpy(), axis=1) * 1.e-6
    a = a.reshape(-1, 1).T

    raw = RawArray(a, info, verbose=False)
    raw.set_annotations(annotations)

    return raw
Ejemplo n.º 13
0
#########################################################################
# Empty lists in which we are going to store activation values.
z_scores_right = []
z_scores_left = []
for (fmri_img, confound, events) in zip(
        models_run_imgs, models_confounds, models_events):
    texture = surface.vol_to_surf(fmri_img[0], fsaverage.pial_right)
    n_scans = texture.shape[1]
    frame_times = t_r * (np.arange(n_scans) + .5)

    # Create the design matrix
    #
    # We specify an hrf model containing Glover model and its time derivative.
    # The drift model is implicitly a cosine basis with period cutoff 128s.
    design_matrix = make_first_level_design_matrix(
        frame_times, events=events[0], hrf_model='glover + derivative',
        add_regs=confound[0])

    # Contrast specification
    contrast_values = (design_matrix.columns == 'language') * 1.0 -\
                      (design_matrix.columns == 'string')

    # Setup and fit GLM.
    # Note that the output consists in 2 variables: `labels` and `fit`
    # `labels` tags voxels according to noise autocorrelation.
    # `estimates` contains the parameter estimates.
    # We input them for contrast computation.
    labels, estimates = run_glm(texture.T, design_matrix.values)
    contrast = compute_contrast(labels, estimates, contrast_values,
                                contrast_type='t')
    # We present the Z-transform of the t map.
Ejemplo n.º 14
0
def sub_tcontrasts1(session:Union[dict,Bunch]=None,
                    sub_id:str=None,
                    tr:float=None,
                    frame_times:list=None,
                    hrf_model:str=None,
                    events:pd.DataFrame=None,
                    fmri_img:Nifti1Image=None,
                    sub_outdir:Union[str,os.PathLike]=None):
    """
    Create beta values maps using nilearn first-level model.

    The beta values correspond to the following contrasts between conditions:
    control, encoding, and encoding_minus_control

    Parameters:
    ----------
    sub_id: string (subject's dccsub_id)
    tr: float (length of time to repetition, in seconds)
    frames_times: list of float (onsets of fMRI frames, in seconds)
    hrf_model: string (type of HRF model)
    confounds: pandas dataframe (motion and other noise regressors)
    all_events: string (task information: trials' onset time, duration and label)
    fmrsub_idir: string (path to directory with fMRI data)
    outdir: string (path to subject's image output directory)

    Return:
    ----------
    None (beta maps are exported in sub_outdir)
    """

    if isinstance(session, dict):
        session = Bunch(**session)
    # Model 1: encoding vs control conditions
    events1 = session.events.copy(deep = True)
    cols = ['onset', 'duration', 'trial_type']
    events1 = events1[cols]

    # create the model - Should data be standardized?
    model1 = FirstLevelModel(**session.glm_defs)

    # create the design matrices
    design1 = make_first_level_design_matrix(events=events1, **session.design_defs)

    # fit model with design matrix
    model1 = model1.fit(session.cleaned_fmri, design_matrices = design1)

    # Condition order: control, encoding (alphabetical)
    # contrast 1.1: control condition
    ctl_vec = np.repeat(0, design1.shape[1])
    ctl_vec[0] = 1
    b11_map = model1.compute_contrast(ctl_vec, output_type='effect_size') #"effect_size" for betas
    b11_name = f'betas_{session.sub_id}_ctl.nii'

    #contrast 1.2: encoding condition
    enc_vec = np.repeat(0, design1.shape[1])
    enc_vec[1] = 1
    b12_map = model1.compute_contrast(enc_vec, output_type='effect_size') #"effect_size" for betas
    b12_name = f'betas_{session.sub_id}_enc.nii'

    #contrast 1.3: encoding minus control
    encMinCtl_vec = np.repeat(0, design1.shape[1])
    encMinCtl_vec[1] = 1
    encMinCtl_vec[0] = -1
    b13_map = model1.compute_contrast(encMinCtl_vec, output_type='effect_size') #"effect_size" for betas
    b13_name = f'betas_{session.sub_id}_enc_minus_ctl.nii'
    contrasts = ((b11_map, b11_name), (b12_map, b12_name), (b13_map, b13_name))
    if sub_outdir is not None:
        savedir = os.path.join(sub_outdir, session.sub_id, session.ses_id)
        os.makedirs(savedir, exist_ok=True)
        [nibabel.save(*contrast) for contrast in contrasts]
    return contrasts
Ejemplo n.º 15
0
def simulate_nirs_raw(sfreq=3.,
                      amplitude=1.,
                      annot_desc='A',
                      sig_dur=300.,
                      stim_dur=5.,
                      isi_min=15.,
                      isi_max=45.,
                      ch_name='Simulated'):
    """
    Create simulated data.

      .. warning:: Work in progress: I am trying to think on the best API.

    Parameters
    ----------
    sfreq : Number
        The sample rate.
    amplitude : Number, Array of numbers
        The amplitude of the signal to simulate in uM.
    annot_desc : String, Array of strings
        The name of the annotations for simulated amplitudes.
    sig_dur : Number
        The length of the signal to generate in seconds.
    stim_dur : Number, Array of numbers
        The length of the stimulus to generate in seconds.
    isi_min : Number
        The minimum duration of the inter stimulus interval in seconds.
    isi_max : Number
        The maximum duration of the inter stimulus interval in seconds.
    ch_name : String
        Channel name to be used in returned raw instance.

    Returns
    -------
    raw : instance of Raw
        The generated raw instance.
    """
    from nilearn.glm.first_level import make_first_level_design_matrix
    from pandas import DataFrame

    if type(amplitude) is not list:
        amplitude = [amplitude]
    if type(annot_desc) is not list:
        annot_desc = [annot_desc]
    if type(stim_dur) is not list:
        stim_dur = [stim_dur]

    frame_times = np.arange(sig_dur * sfreq) / sfreq

    assert len(amplitude) == len(annot_desc), "Same number of amplitudes as " \
                                              "annotations required."
    assert len(amplitude) == len(stim_dur), "Same number of amplitudes as " \
                                            "durations required."

    onset = 0.
    onsets = []
    conditions = []
    durations = []
    while onset < sig_dur - 60:
        c_idx = np.random.randint(0, len(amplitude))
        onset += np.random.uniform(isi_min, isi_max) + stim_dur[c_idx]
        onsets.append(onset)
        conditions.append(annot_desc[c_idx])
        durations.append(stim_dur[c_idx])

    events = DataFrame({
        'trial_type': conditions,
        'onset': onsets,
        'duration': durations
    })

    dm = make_first_level_design_matrix(frame_times,
                                        events,
                                        drift_model='polynomial',
                                        drift_order=0)
    dm = dm.drop(columns='constant')

    annotations = Annotations(onsets, durations, conditions)

    info = create_info(ch_names=[ch_name], sfreq=sfreq, ch_types=['hbo'])

    for idx, annot in enumerate(annot_desc):
        if annot in dm.columns:
            dm[annot] *= amplitude[idx]

    a = np.sum(dm.to_numpy(), axis=1) * 1.e-6
    a = a.reshape(-1, 1).T

    raw = RawArray(a, info, verbose=False)
    raw.set_annotations(annotations)

    return raw
Ejemplo n.º 16
0
import pandas as pd
events = pd.DataFrame({
    'trial_type': conditions,
    'onset': onsets,
    'duration': duration
})

#########################################################################
# We sample the events into a design matrix, also including additional
# regressors.
hrf_model = 'glover'
from nilearn.glm.first_level import make_first_level_design_matrix
X1 = make_first_level_design_matrix(frame_times,
                                    events,
                                    drift_model='polynomial',
                                    drift_order=3,
                                    add_regs=motion,
                                    add_reg_names=add_reg_names,
                                    hrf_model=hrf_model)

#########################################################################
# Now we compute a block design matrix. We add duration to create the blocks.
# For this we first define an event structure that includes the duration
# parameter.
duration = 7. * np.ones(len(conditions))
events = pd.DataFrame({
    'trial_type': conditions,
    'onset': onsets,
    'duration': duration
})
Ejemplo n.º 17
0
from nilearn.glm.first_level import make_first_level_design_matrix
design_matrices = []

#########################################################################
# Loop over the two sessions.
for idx, img in enumerate(fmri_img, start=1):
    # Build experimental paradigm
    n_scans = img.shape[-1]
    events = pd.read_table(subject_data['events{}'.format(idx)])
    # Define the sampling times for the design matrix
    frame_times = np.arange(n_scans) * tr
    # Build design matrix with the reviously defined parameters
    design_matrix = make_first_level_design_matrix(
            frame_times,
            events,
            hrf_model=hrf_model,
            drift_model=drift_model,
            high_pass=high_pass,
            )

    # put the design matrices in a list
    design_matrices.append(design_matrix)

#########################################################################
# We can specify basic contrasts (to get beta maps).
# We start by specifying canonical contrast that isolate design matrix columns.
contrast_matrix = np.eye(design_matrix.shape[1])
basic_contrasts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])

#########################################################################
Ejemplo n.º 18
0
def sub_tcontrasts2(session:Union[dict,Bunch]=None,
                    sub_id:str=None,
                    tr:float=None,
                    frame_times:list=None,
                    hrf_model:str=None,
                    events:pd.DataFrame=None,
                    fmri_img:Nifti1Image=None,
                    sub_outdir:Union[str,os.PathLike]=None):
    """
    Create beta values maps using nilearn first-level model.

    The beta values correspond to the following contrasts between conditions:
    hit, miss, hit_minus_miss, hit_minus_ctl and miss_minus_ctl

    Parameters:
    ----------
    sub_id: string (subject's dccsub_id)
    tr: float (length of time to repetition, in seconds)
    frames_times: list of float (onsets of fMRI frames, in seconds)
    hrf_model: string (type of HRF model)
    confounds: pandas dataframe (motion and other noise regressors)
    all_events: string (task information: trials' onset time, duration and label)
    fmrsub_idir: string (path to directory with fMRI data)
    outdir: string (path to subject's image output directory)

    Return:
    ----------
    None (beta maps are exported in sub_outdir)
    """
    if isinstance(session, dict):
        session = Bunch(**session)
    # Model 1: encoding vs control conditions
    events2 = session.events.copy(deep = True)
    cols = ['onset', 'duration', 'recognition_performance']
    events2 = events2[cols]
    events2.rename(columns={'recognition_performance':'trial_type'},
                   inplace=True)

    # create the model - Should data be standardized?
    model2 = FirstLevelModel(**session.glm_defs)

    # create the design matrices
    design2 = make_first_level_design_matrix(events=events2,**session.design_defs)

    # fit model with design matrix
    model2 = model2.fit(session.cleaned_fmri, design_matrices = design2)

    # Condition order: control, hit, missed (alphabetical)
    #contrast 2.1: miss
    miss_vec = np.repeat(0, design2.shape[1])
    miss_vec[2] = 1
    b21_map = model2.compute_contrast(miss_vec, output_type='effect_size') #"effect_size" for betas
    b21_name = f'betas_{session.sub_id}_miss.nii'
#     b21_name = os.path.join(sub_outdir, 'betas_sub'+str(sub_id)+'_miss.nii')
#     nibabel.save(b21_map, b21_name)

    #contrast 2.2: hit
    hit_vec = np.repeat(0, design2.shape[1])
    hit_vec[1] = 1
    b22_map = model2.compute_contrast(hit_vec, output_type='effect_size') #"effect_size" for betas
    b22_name = f'betas_{session.sub_id}_hit.nii'

    #contrast 2.3: hit minus miss
    hit_min_miss_vec = np.repeat(0, design2.shape[1])
    hit_min_miss_vec[1] = 1
    hit_min_miss_vec[2] = -1
    b23_map = model2.compute_contrast(hit_min_miss_vec, output_type='effect_size') #"effect_size" for betas
    b23_name = f'betas_{session.sub_id}_hit_minus_miss.nii'

    #contrast 2.4: hit minus control
    hit_min_ctl_vec = np.repeat(0, design2.shape[1])
    hit_min_ctl_vec[1] = 1
    hit_min_ctl_vec[0] = -1
    b24_map = model2.compute_contrast(hit_min_ctl_vec, output_type='effect_size') #"effect_size" for betas
    b24_name = f'betas_{session.sub_id}_hit_minus_ctl.nii'

    #contrast 2.5: miss minus control
    miss_min_ctl_vec = np.repeat(0, design2.shape[1])
    miss_min_ctl_vec[2] = 1
    miss_min_ctl_vec[0] = -1
    b25_map = model2.compute_contrast(miss_min_ctl_vec, output_type='effect_size') #"effect_size" for betas
    b25_name = f'betas_{session.sub_id}_miss_minus_ctl.nii'
    
    contrasts = ((b21_map, b21_name), (b22_map, b22_name), (b23_map, b23_name),
                 (b24_map, b24_name), (b25_map, b25_name))

    if sub_outdir is not None:
        savedir = os.path.join(sub_outdir, session.sub_id, session.ses_id)
        os.makedirs(savedir, exist_ok=True)
        [nibabel.save(*contrast) for contrast in contrasts]
    return contrasts