Beispiel #1
0
def test_select_from_index(request_mocker):
    dataset_version = 'ds000030_R1.0.4'
    data_prefix = '{}/{}/uncompressed'.format(
        dataset_version.split('_')[0], dataset_version)
    # Prepare url files for subject and filter tests
    urls = [data_prefix + '/stuff.html',
            data_prefix + '/sub-xxx.html',
            data_prefix + '/sub-yyy.html',
            data_prefix + '/sub-xxx/ses-01_task-rest.txt',
            data_prefix + '/sub-xxx/ses-01_task-other.txt',
            data_prefix + '/sub-xxx/ses-02_task-rest.txt',
            data_prefix + '/sub-xxx/ses-02_task-other.txt',
            data_prefix + '/sub-yyy/ses-01.txt',
            data_prefix + '/sub-yyy/ses-02.txt']

    # Only 1 subject and not subject specific files get downloaded
    new_urls = func.select_from_index(urls, n_subjects=1)
    assert len(new_urls) == 6
    assert data_prefix + '/sub-yyy.html' not in new_urls

    # 2 subjects and not subject specific files get downloaded
    new_urls = func.select_from_index(urls, n_subjects=2)
    assert len(new_urls) == 9
    assert data_prefix + '/sub-yyy.html' in new_urls
    # ALL subjects and not subject specific files get downloaded
    new_urls = func.select_from_index(urls, n_subjects=None)
    assert len(new_urls) == 9

    # test inclusive filters. Only files with task-rest
    new_urls = func.select_from_index(
        urls, inclusion_filters=['*task-rest*'])
    assert len(new_urls) == 2
    assert data_prefix + '/stuff.html' not in new_urls

    # test exclusive filters. only files without ses-01
    new_urls = func.select_from_index(
        urls, exclusion_filters=['*ses-01*'])
    assert len(new_urls) == 6
    assert data_prefix + '/stuff.html' in new_urls

    # test filter combination. only files with task-rest and without ses-01
    new_urls = func.select_from_index(
        urls, inclusion_filters=['*task-rest*'],
        exclusion_filters=['*ses-01*'])
    assert len(new_urls) == 1
    assert data_prefix + '/sub-xxx/ses-02_task-rest.txt' in new_urls
# dataset available in openneuro.
# This dataset contains the necessary information to run a statistical analysis
# using Nilearn. The dataset also contains statistical results from a previous
# FSL analysis that we can employ for comparison with the Nilearn estimation.
from nilearn.datasets.func import (fetch_openneuro_dataset_index,
                                   fetch_openneuro_dataset, select_from_index)

_, urls = fetch_openneuro_dataset_index()

exclusion_patterns = [
    '*group*', '*phenotype*', '*mriqc*', '*parameter_plots*', '*physio_plots*',
    '*space-fsaverage*', '*space-T1w*', '*dwi*', '*beh*', '*task-bart*',
    '*task-rest*', '*task-scap*', '*task-task*'
]
urls = select_from_index(urls,
                         exclusion_filters=exclusion_patterns,
                         n_subjects=1)

data_dir, _ = fetch_openneuro_dataset(urls=urls)

##############################################################################
# Obtain FirstLevelModel objects automatically and fit arguments
# ---------------------------------------------------------------
# From the dataset directory we automatically obtain FirstLevelModel objects
# with their subject_id filled from the BIDS dataset. Moreover we obtain,
# for each model, the list of run images and their respective events and
# confound regressors. Those are inferred from the confounds.tsv files
# available in the BIDS dataset.
# To get the first level models we have to specify the dataset directory,
# the task_label and the space_label as specified in the file names.
# We also have to provide the folder with the desired derivatives, that in this