def _make_flm(data_dir):  # pragma: no cover
    task_label = 'stopsignal'
    space_label = 'MNI152NLin2009cAsym'
    derivatives_folder = 'derivatives/fmriprep'
    models, models_run_imgs, models_events, models_confounds = \
        first_level_from_bids(
            data_dir, task_label, space_label, smoothing_fwhm=5.0,
            derivatives_folder=derivatives_folder)

    model, imgs, _, _ = (models[0], models_run_imgs[0], models_events[0],
                         models_confounds[0])
    subject = 'sub-' + model.subject_label
    design_matrix = _make_design_matrix_for_bids_feature(data_dir, subject)
    model.fit(imgs, design_matrices=[design_matrix])
    return model, subject
Esempio n. 2
0
def test_first_level_from_bids():
    with InTemporaryDirectory():
        bids_path = create_fake_bids_dataset(n_sub=10,
                                             n_ses=2,
                                             tasks=['localizer', 'main'],
                                             n_runs=[1, 3])
        # test arguments are provided correctly
        with pytest.raises(TypeError):
            first_level_from_bids(2, 'main', 'MNI')
        with pytest.raises(ValueError):
            first_level_from_bids('lolo', 'main', 'MNI')
        with pytest.raises(TypeError):
            first_level_from_bids(bids_path, 2, 'MNI')
        with pytest.raises(TypeError):
            first_level_from_bids(bids_path, 'main', 'MNI', model_init=[])
        with pytest.raises(TypeError, match="space_label must be a string"):
            first_level_from_bids(bids_path, 'main', space_label=42)

        with pytest.raises(TypeError, match="img_filters must be a list"):
            first_level_from_bids(bids_path, 'main', img_filters="foo")

        with pytest.raises(TypeError, match="filters in img"):
            first_level_from_bids(bids_path, 'main', img_filters=[(1, 2)])

        with pytest.raises(ValueError,
                           match="field foo is not a possible filter."):
            first_level_from_bids(bids_path,
                                  'main',
                                  img_filters=[("foo", "bar")])

        # test output is as expected
        models, m_imgs, m_events, m_confounds = first_level_from_bids(
            bids_path, 'main', 'MNI', [('desc', 'preproc')])
        assert len(models) == len(m_imgs)
        assert len(models) == len(m_events)
        assert len(models) == len(m_confounds)
        # test repeated run tag error when run tag is in filenames
        # can arise when desc or space is present and not specified
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main',
                                  'T1w')  # desc not specified
        # test more than one ses file error when run tag is not in filenames
        # can arise when desc or space is present and not specified
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'localizer',
                                  'T1w')  # desc not specified
        # test issues with confound files. There should be only one confound
        # file per img. An one per image or None. Case when one is missing
        confound_files = get_bids_files(os.path.join(bids_path, 'derivatives'),
                                        file_tag='desc-confounds_timeseries')
        os.remove(confound_files[-1])
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main', 'MNI')
        # test issues with event files
        events_files = get_bids_files(bids_path, file_tag='events')
        os.remove(events_files[0])
        # one file missing
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main', 'MNI')
        for f in events_files[1:]:
            os.remove(f)
        # all files missing
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main', 'MNI')

        # In case different desc and spaces exist and are not selected we
        # fail and ask for more specific information
        shutil.rmtree(os.path.join(bids_path, 'derivatives'))
        # issue if no derivatives folder is present
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main', 'MNI')

        # check runs are not repeated when ses field is not used
        shutil.rmtree(bids_path)
        bids_path = create_fake_bids_dataset(n_sub=10,
                                             n_ses=1,
                                             tasks=['localizer', 'main'],
                                             n_runs=[1, 3],
                                             no_session=True)
        # test repeated run tag error when run tag is in filenames and not ses
        # can arise when desc or space is present and not specified
        with pytest.raises(ValueError):
            first_level_from_bids(bids_path, 'main',
                                  'T1w')  # desc not specified
Esempio n. 3
0
print(data_dir)

##############################################################################
# Obtain automatically FirstLevelModel objects and fit arguments
# --------------------------------------------------------------
# From the dataset directory we automatically obtain the FirstLevelModel objects
# with their subject_id filled from the :term:`BIDS` dataset. Moreover, we obtain
# for each model a dictionary with run_imgs, events and confounder regressors
# since in this case a confounds.tsv file is available in the :term:`BIDS` dataset.
# To get the first level models we only have to specify the dataset directory
# and the task_label as specified in the file names.
from nilearn.glm.first_level import first_level_from_bids
task_label = 'languagelocalizer'
models, models_run_imgs, models_events, models_confounds = \
    first_level_from_bids(
        data_dir, task_label,
        img_filters=[('desc', 'preproc')])

#############################################################################
# Quick sanity check on fit arguments
# -----------------------------------
# Additional checks or information extraction from pre-processed data can
# be made here.

############################################################################
# We just expect one run_img per subject.
import os
print([os.path.basename(run) for run in models_run_imgs[0]])

###############################################################################
# The only confounds stored are regressors obtained from motion correction. As
Esempio n. 4
0
# From the dataset directory we automatically obtain FirstLevelModel objects
# with their subject_id filled from the :term:`BIDS` dataset. Moreover we obtain,
# for each model, the list of run images and their respective events and
# confound regressors. Those are inferred from the confounds.tsv files
# available in the :term:`BIDS` dataset.
# To get the first level models we have to specify the dataset directory,
# the task_label and the space_label as specified in the file names.
# We also have to provide the folder with the desired derivatives, that in this
# case were produced by the fmriprep :term:`BIDS` app.
from nilearn.glm.first_level import first_level_from_bids
task_label = 'stopsignal'
space_label = 'MNI152NLin2009cAsym'
derivatives_folder = 'derivatives/fmriprep'
models, models_run_imgs, models_events, models_confounds = \
    first_level_from_bids(data_dir, task_label, space_label,
                          smoothing_fwhm=5.0,
                          derivatives_folder=derivatives_folder)

#############################################################################
# Access the model and model arguments of the subject and process events.
model, imgs, events, confounds = (models[0], models_run_imgs[0],
                                  models_events[0], models_confounds[0])
subject = 'sub-' + model.subject_label

import os
from nilearn._utils.glm import get_design_from_fslmat
fsl_design_matrix_path = os.path.join(data_dir, 'derivatives', 'task', subject,
                                      'stopsignal.feat', 'design.mat')
design_matrix = get_design_from_fslmat(fsl_design_matrix_path,
                                       column_names=None)
##############################################################################
# Prepare data and analysis parameters
# ------------------------------------
# Download data in :term:`BIDS` format and event information for one subject,
# and create a standard :class:`~nilearn.glm.first_level.FirstLevelModel`.
from nilearn.datasets import fetch_language_localizer_demo_dataset

data_dir, _ = fetch_language_localizer_demo_dataset()

from nilearn.glm.first_level import first_level_from_bids

models, models_run_imgs, events_dfs, models_confounds = \
    first_level_from_bids(
        data_dir,
        'languagelocalizer',
        img_filters=[('desc', 'preproc')],
    )

# Grab the first subject's model, functional file, and events DataFrame
standard_glm = models[0]
fmri_file = models_run_imgs[0][0]
events_df = events_dfs[0][0]

# We will use first_level_from_bids's parameters for the other models
glm_parameters = standard_glm.get_params()
# We need to override one parameter (signal_scaling) with the value of
# scaling_axis
glm_parameters['signal_scaling'] = standard_glm.scaling_axis

##############################################################################