示例#1
0
文件: base.py 项目: hensel-f/smriprep
def init_single_subject_wf(
    debug,
    freesurfer,
    fast_track,
    hires,
    layout,
    longitudinal,
    low_mem,
    name,
    omp_nthreads,
    output_dir,
    skull_strip_fixed_seed,
    skull_strip_mode,
    skull_strip_template,
    spaces,
    subject_id,
    bids_filters,
):
    """
    Create a single subject workflow.

    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from collections import namedtuple
            from niworkflows.utils.spaces import SpatialReferences, Reference
            from smriprep.workflows.base import init_single_subject_wf
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            wf = init_single_subject_wf(
                debug=False,
                freesurfer=True,
                fast_track=False,
                hires=True,
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                name='single_subject_wf',
                omp_nthreads=1,
                output_dir='.',
                skull_strip_fixed_seed=False,
                skull_strip_mode='force',
                skull_strip_template=Reference('OASIS30ANTs'),
                spaces=SpatialReferences(spaces=['MNI152NLin2009cAsym', 'fsaverage5']),
                subject_id='test',
                bids_filters=None,
            )

    Parameters
    ----------
    debug : :obj:`bool`
        Enable debugging outputs
    freesurfer : :obj:`bool`
        Enable FreeSurfer surface reconstruction (may increase runtime)
    fast_track : :obj:`bool`
        If ``True``, attempt to collect previously run derivatives.
    hires : :obj:`bool`
        Enable sub-millimeter preprocessing in FreeSurfer
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : :obj:`bool`
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : :obj:`bool`
        Write uncompressed .nii files in some cases to reduce memory usage
    name : :obj:`str`
        Name of workflow
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    output_dir : :obj:`str`
        Directory in which to save derivatives
    skull_strip_fixed_seed : :obj:`bool`
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    skull_strip_mode : :obj:`str`
        Determiner for T1-weighted skull stripping (`force` ensures skull stripping,
        `skip` ignores skull stripping, and `auto` automatically ignores skull stripping
        if pre-stripped brains are detected).
    skull_strip_template : :py:class:`~niworkflows.utils.spaces.Reference`
        Spatial reference to use in atlas-based brain extraction.
    spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
        Object containing standard and nonstandard space specifications.
    subject_id : :obj:`str`
        List of subject labels
    bids_filters : dict
        Provides finer specification of the pipeline input files through pybids entities filters.
        A dict with the following structure {<suffix>:{<entity>:<filter>,...},...}

    Inputs
    ------
    subjects_dir
        FreeSurfer SUBJECTS_DIR

    """
    from ..interfaces.reports import AboutSummary, SubjectSummary
    if name in ('single_subject_wf', 'single_subject_smripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
        }
    else:
        subject_data = collect_data(layout,
                                    subject_id,
                                    bids_filters=bids_filters)[0]

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *sMRIPprep* {smriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(smriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

For more details of the pipeline, see [the section corresponding
to workflows in *sMRIPrep*'s documentation]\
(https://smriprep.readthedocs.io/en/latest/workflows.html \
"sMRIPrep's documentation").


### References

"""

    deriv_cache = None
    if fast_track:
        from ..utils.bids import collect_derivatives
        std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, ))
        deriv_cache = collect_derivatives(
            Path(output_dir) / 'smriprep', subject_id, std_spaces, freesurfer)

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=True),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=layout.root),
                        name='bids_info',
                        run_without_submitting=True)

    summary = pe.Node(
        SubjectSummary(output_spaces=spaces.get_spaces(nonstandard=False)),
        name='summary',
        run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=output_dir,
        dismiss_entities=("session", ),
        desc='summary',
        datatype="figures"),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=output_dir,
        dismiss_entities=("session", ),
        desc='about',
        datatype="figures"),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        existing_derivatives=deriv_cache,
        freesurfer=freesurfer,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        t1w=subject_data['t1w'],
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_mode=skull_strip_mode,
        skull_strip_template=skull_strip_template,
        spaces=spaces,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    return workflow
示例#2
0
def init_single_subject_wf(
        layout, subject_id, task_id, echo_idx, name, reportlets_dir,
        output_dir, ignore, debug, low_mem, anat_only, longitudinal, t2s_coreg,
        omp_nthreads, skull_strip_template, skull_strip_fixed_seed, freesurfer,
        output_spaces, template, medial_surface_nan, cifti_output, hires,
        use_bbr, bold2t1w_dof, fmap_bspline, fmap_demean, use_syn, force_syn,
        template_out_grid, use_aroma, aroma_melodic_dim, err_on_aroma_warn):
    """
    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.base import init_single_subject_wf
        from collections import namedtuple
        BIDSLayout = namedtuple('BIDSLayout', ['root'], defaults='.')
        wf = init_single_subject_wf(layout=BIDSLayout(),
                                    subject_id='test',
                                    task_id='',
                                    echo_idx=None,
                                    name='single_subject_wf',
                                    reportlets_dir='.',
                                    output_dir='.',
                                    ignore=[],
                                    debug=False,
                                    low_mem=False,
                                    anat_only=False,
                                    longitudinal=False,
                                    t2s_coreg=False,
                                    omp_nthreads=1,
                                    skull_strip_template='OASIS30ANTs',
                                    skull_strip_fixed_seed=False,
                                    freesurfer=True,
                                    template='MNI152NLin2009cAsym',
                                    output_spaces=['T1w', 'fsnative',
                                                  'template', 'fsaverage5'],
                                    medial_surface_nan=False,
                                    cifti_output=False,
                                    hires=True,
                                    use_bbr=True,
                                    bold2t1w_dof=9,
                                    fmap_bspline=False,
                                    fmap_demean=True,
                                    use_syn=True,
                                    force_syn=True,
                                    template_out_grid='native',
                                    use_aroma=False,
                                    aroma_melodic_dim=-200,
                                    err_on_aroma_warn=False)

    Parameters

        layout : BIDSLayout object
            BIDS dataset layout
        subject_id : str
            List of subject labels
        task_id : str or None
            Task ID of BOLD series to preprocess, or ``None`` to preprocess all
        echo_idx : int or None
            Index of echo to preprocess in multiecho BOLD series,
            or ``None`` to preprocess all
        name : str
            Name of workflow
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        debug : bool
            Enable debugging outputs
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        anat_only : bool
            Disable functional workflows
        longitudinal : bool
            Treat multiple sessions as longitudinal (may increase runtime)
            See sub-workflows for specific differences
        t2s_coreg : bool
            For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
        omp_nthreads : int
            Maximum number of threads an individual process may use
        skull_strip_template : str
            Name of ANTs skull-stripping template ('OASIS30ANTs' or 'NKI')
        skull_strip_fixed_seed : bool
            Do not use a random seed for skull-stripping - will ensure
            run-to-run replicability when used with --omp-nthreads 1
        reportlets_dir : str
            Directory in which to save reportlets
        output_dir : str
            Directory in which to save derivatives
        freesurfer : bool
            Enable FreeSurfer surface reconstruction (may increase runtime)
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output spaces.

            Valid spaces:

             - T1w
             - template
             - fsnative
             - fsaverage (or other pre-existing FreeSurfer templates)
        template : str
            Name of template targeted by ``template`` output space
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        hires : bool
            Enable sub-millimeter preprocessing in FreeSurfer
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        err_on_aroma_warn : bool
            Do not fail on ICA-AROMA errors

    Inputs

        subjects_dir
            FreeSurfer SUBJECTS_DIR

    """
    if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
            'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
        }
    else:
        subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]

    # Make sure we always go through these two checks
    if not anat_only and subject_data['bold'] == []:
        raise Exception("No BOLD images found for participant {} and task {}. "
                        "All workflows require BOLD images.".format(
                            subject_id, task_id if task_id else '<all>'))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### References

""".format(nilearn_ver=nilearn_ver)

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=anat_only),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=layout.root, bids_validate=False),
                        name='bids_info')

    summary = pe.Node(SubjectSummary(output_spaces=output_spaces,
                                     template=template),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, suffix='summary'),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, suffix='about'),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        freesurfer=freesurfer,
        fs_spaces=output_spaces,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        num_t1w=len(subject_data['t1w']),
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        reportlets_dir=reportlets_dir,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_template=skull_strip_template,
        template=template,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            workflow.get_node(node).interface.out_path_base = 'fmriprep'

    if anat_only:
        return workflow

    for bold_file in subject_data['bold']:
        func_preproc_wf = init_func_preproc_wf(
            bold_file=bold_file,
            layout=layout,
            ignore=ignore,
            freesurfer=freesurfer,
            use_bbr=use_bbr,
            t2s_coreg=t2s_coreg,
            bold2t1w_dof=bold2t1w_dof,
            reportlets_dir=reportlets_dir,
            output_spaces=output_spaces,
            template=template,
            medial_surface_nan=medial_surface_nan,
            cifti_output=cifti_output,
            output_dir=output_dir,
            omp_nthreads=omp_nthreads,
            low_mem=low_mem,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            use_syn=use_syn,
            force_syn=force_syn,
            debug=debug,
            template_out_grid=template_out_grid,
            use_aroma=use_aroma,
            aroma_melodic_dim=aroma_melodic_dim,
            err_on_aroma_warn=err_on_aroma_warn,
            num_bold=len(subject_data['bold']))

        workflow.connect([
            (
                anat_preproc_wf,
                func_preproc_wf,
                [
                    (('outputnode.t1_preproc', _pop), 'inputnode.t1_preproc'),
                    ('outputnode.t1_brain', 'inputnode.t1_brain'),
                    ('outputnode.t1_mask', 'inputnode.t1_mask'),
                    ('outputnode.t1_seg', 'inputnode.t1_seg'),
                    ('outputnode.t1_aseg', 'inputnode.t1_aseg'),
                    ('outputnode.t1_aparc', 'inputnode.t1_aparc'),
                    ('outputnode.t1_tpms', 'inputnode.t1_tpms'),
                    ('outputnode.t1_2_mni_forward_transform',
                     'inputnode.t1_2_mni_forward_transform'),
                    ('outputnode.t1_2_mni_reverse_transform',
                     'inputnode.t1_2_mni_reverse_transform'),
                    # Undefined if --no-freesurfer, but this is safe
                    ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                    ('outputnode.subject_id', 'inputnode.subject_id'),
                    ('outputnode.t1_2_fsnative_forward_transform',
                     'inputnode.t1_2_fsnative_forward_transform'),
                    ('outputnode.t1_2_fsnative_reverse_transform',
                     'inputnode.t1_2_fsnative_reverse_transform')
                ]),
        ])

    return workflow
示例#3
0
文件: base.py 项目: nicholst/fmriprep
def init_single_subject_wf(
    anat_only,
    aroma_melodic_dim,
    bold2t1w_dof,
    cifti_output,
    debug,
    dummy_scans,
    echo_idx,
    err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    hires,
    ignore,
    layout,
    longitudinal,
    low_mem,
    medial_surface_nan,
    name,
    omp_nthreads,
    output_dir,
    reportlets_dir,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    skull_strip_fixed_seed,
    skull_strip_template,
    spaces,
    subject_id,
    t2s_coreg,
    task_id,
    use_aroma,
    use_bbr,
    use_syn,
):
    """
    This workflow organizes the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from collections import namedtuple
            from niworkflows.utils.spaces import Reference, SpatialReferences
            from fmriprep.workflows.base import init_single_subject_wf

            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            wf = init_single_subject_wf(
                anat_only=False,
                aroma_melodic_dim=-200,
                bold2t1w_dof=9,
                cifti_output=False,
                debug=False,
                dummy_scans=None,
                echo_idx=None,
                err_on_aroma_warn=False,
                fmap_bspline=False,
                fmap_demean=True,
                force_syn=True,
                freesurfer=True,
                hires=True,
                ignore=[],
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                medial_surface_nan=False,
                name='single_subject_wf',
                omp_nthreads=1,
                output_dir='.',
                reportlets_dir='.',
                regressors_all_comps=False,
                regressors_dvars_th=1.5,
                regressors_fd_th=0.5,
                skull_strip_fixed_seed=False,
                skull_strip_template=Reference('OASIS30ANTs'),
                spaces=SpatialReferences(
                    spaces=['MNI152Lin',
                            ('fsaverage', {'density': '10k'}),
                            'T1w',
                            'fsnative'],
                    checkpoint=True),
                subject_id='test',
                t2s_coreg=False,
                task_id='',
                use_aroma=False,
                use_bbr=True,
                use_syn=True,
            )

    Parameters
    ----------
    anat_only : bool
        Disable functional workflows
    aroma_melodic_dim : int
        Maximum number of components identified by MELODIC within ICA-AROMA
        (default is -200, i.e., no limitation).
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    cifti_output : bool
        Generate bold CIFTI file in output spaces
    debug : bool
        Enable debugging outputs
    dummy_scans : int or None
        Number of volumes to consider as non steady state
    echo_idx : int or None
        Index of echo to preprocess in multiecho BOLD series,
        or ``None`` to preprocess all
    err_on_aroma_warn : bool
        Do not fail on ICA-AROMA errors
    fmap_bspline : bool
        **Experimental**: Fit B-Spline field using least-squares
    fmap_demean : bool
        Demean voxel-shift map during unwarp
    force_syn : bool
        **Temporary**: Always run SyN-based SDC
    freesurfer : bool
        Enable FreeSurfer surface reconstruction (may increase runtime)
    hires : bool
        Enable sub-millimeter preprocessing in FreeSurfer
    ignore : list
        Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : bool
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : bool
        Write uncompressed .nii files in some cases to reduce memory usage
    medial_surface_nan : bool
        Replace medial wall values with NaNs on functional GIFTI files
    name : str
        Name of workflow
    omp_nthreads : int
        Maximum number of threads an individual process may use
    output_dir : str
        Directory in which to save derivatives
    reportlets_dir : str
        Directory in which to save reportlets
    regressors_all_comps
        Return all CompCor component time series instead of the top fraction
    regressors_fd_th
        Criterion for flagging framewise displacement outliers
    regressors_dvars_th
        Criterion for flagging DVARS outliers
    skull_strip_fixed_seed : bool
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    skull_strip_template : tuple
        Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
        and corresponding dictionary of output-space modifiers.
    subject_id : str
        List of subject labels
    t2s_coreg : bool
        For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
    spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
        A container for storing, organizing, and parsing spatial normalizations. Composed of
        :py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
        Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
        (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
        (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
        the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
        dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
        would lead to resampling on a 2mm resolution of the space).
    task_id : str or None
        Task ID of BOLD series to preprocess, or ``None`` to preprocess all
    use_aroma : bool
        Perform ICA-AROMA on MNI-resampled functional series
    use_bbr : bool or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    use_syn : bool
        **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
        If fieldmaps are present and enabled, this is not run, by default.

    Inputs
    ------
    subjects_dir : str
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
            'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
        }
    else:
        subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]

    # Make sure we always go through these two checks
    if not anat_only and subject_data['bold'] == []:
        raise Exception("No BOLD images found for participant {} and task {}. "
                        "All workflows require BOLD images.".format(
                            subject_id, task_id if task_id else '<all>'))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(
        bids_dir=layout.root, bids_validate=False), name='bids_info')

    summary = pe.Node(SubjectSummary(std_spaces=spaces.get_spaces(nonstandard=False),
                                     nstd_spaces=spaces.get_spaces(standard=False)),
                      name='summary', run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about', run_without_submitting=True)

    ds_report_summary = pe.Node(
        DerivativesDataSink(base_directory=reportlets_dir,
                            desc='summary', keep_dtype=True),
        name='ds_report_summary', run_without_submitting=True)

    ds_report_about = pe.Node(
        DerivativesDataSink(base_directory=reportlets_dir,
                            desc='about', keep_dtype=True),
        name='ds_report_about', run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        freesurfer=freesurfer,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        num_t1w=len(subject_data['t1w']),
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        reportlets_dir=reportlets_dir,
        spaces=spaces,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_template=skull_strip_template,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'),
                            ('t2w', 't2w'),
                            ('bold', 'bold')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            workflow.get_node(node).interface.out_path_base = 'fmriprep'

    if anat_only:
        return workflow

    for bold_file in subject_data['bold']:
        func_preproc_wf = init_func_preproc_wf(
            aroma_melodic_dim=aroma_melodic_dim,
            bold2t1w_dof=bold2t1w_dof,
            bold_file=bold_file,
            cifti_output=cifti_output,
            debug=debug,
            dummy_scans=dummy_scans,
            err_on_aroma_warn=err_on_aroma_warn,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            force_syn=force_syn,
            freesurfer=freesurfer,
            ignore=ignore,
            layout=layout,
            low_mem=low_mem,
            medial_surface_nan=medial_surface_nan,
            num_bold=len(subject_data['bold']),
            omp_nthreads=omp_nthreads,
            output_dir=output_dir,
            reportlets_dir=reportlets_dir,
            regressors_all_comps=regressors_all_comps,
            regressors_fd_th=regressors_fd_th,
            regressors_dvars_th=regressors_dvars_th,
            spaces=spaces,
            t2s_coreg=t2s_coreg,
            use_aroma=use_aroma,
            use_bbr=use_bbr,
            use_syn=use_syn,
        )

        workflow.connect([
            (anat_preproc_wf, func_preproc_wf,
             [(('outputnode.t1w_preproc', _pop), 'inputnode.t1w_preproc'),
              ('outputnode.t1w_brain', 'inputnode.t1w_brain'),
              ('outputnode.t1w_mask', 'inputnode.t1w_mask'),
              ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
              ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
              ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
              ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
              ('outputnode.template', 'inputnode.template'),
              ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
              ('outputnode.joint_template', 'inputnode.joint_template'),
              ('outputnode.joint_anat2std_xfm', 'inputnode.joint_anat2std_xfm'),
              ('outputnode.joint_std2anat_xfm', 'inputnode.joint_std2anat_xfm'),
              # Undefined if --fs-no-reconall, but this is safe
              ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
              ('outputnode.subject_id', 'inputnode.subject_id'),
              ('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'),
              ('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')]),
        ])

    return workflow
示例#4
0
def init_single_subject_wf(subject_id):
    """
    Organize the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep.workflows.base import init_single_subject_wf
            with mock_config():
                wf = init_single_subject_wf('01')

    Parameters
    ----------
    subject_id : :obj:`str`
        Subject label for this single-subject workflow.

    Inputs
    ------
    subjects_dir : :obj:`str`
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber
    from niworkflows.interfaces.nilearn import NILEARN_VERSION
    from niworkflows.utils.bids import collect_data
    from niworkflows.utils.misc import fix_multi_T1w_source_name
    from niworkflows.utils.spaces import Reference
    from smriprep.workflows.anatomical import init_anat_preproc_wf

    name = "single_subject_%s_wf" % subject_id
    subject_data = collect_data(config.execution.layout,
                                subject_id,
                                config.execution.task_id,
                                config.execution.echo_idx,
                                bids_filters=config.execution.bids_filters)[0]

    if 'flair' in config.workflow.ignore:
        subject_data['flair'] = []
    if 't2w' in config.workflow.ignore:
        subject_data['t2w'] = []

    anat_only = config.workflow.anat_only
    # Make sure we always go through these two checks
    if not anat_only and not subject_data['bold']:
        task_id = config.execution.task_id
        raise RuntimeError(
            "No BOLD images found for participant {} and task {}. "
            "All workflows require BOLD images.".format(
                subject_id, task_id if task_id else '<all>'))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(fmriprep_ver=config.environment.version,
           nipype_ver=config.environment.nipype_version)
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    spaces = config.workflow.spaces
    reportlets_dir = str(config.execution.work_dir / 'reportlets')

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=anat_only,
                                      subject_id=subject_id),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir,
                                 bids_validate=False),
                        name='bids_info')

    summary = pe.Node(SubjectSummary(
        std_spaces=spaces.get_spaces(nonstandard=False),
        nstd_spaces=spaces.get_spaces(standard=False)),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=config.environment.version,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='summary', keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='about', keep_dtype=True),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=str(config.execution.bids_dir),
        debug=config.execution.debug is True,
        freesurfer=config.workflow.run_reconall,
        hires=config.workflow.hires,
        longitudinal=config.workflow.longitudinal,
        omp_nthreads=config.nipype.omp_nthreads,
        output_dir=str(config.execution.output_dir),
        reportlets_dir=reportlets_dir,
        skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed,
        skull_strip_mode=config.workflow.skull_strip_t1w,
        skull_strip_template=Reference.from_string(
            config.workflow.skull_strip_template)[0],
        spaces=spaces,
        t1w=subject_data['t1w'],
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            workflow.get_node(node).interface.out_path_base = 'fmriprep'

    if anat_only:
        return workflow

    # Append the functional section to the existing anatomical exerpt
    # That way we do not need to stream down the number of bold datasets
    anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or '') + """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=len(subject_data['bold']))

    for bold_file in subject_data['bold']:
        func_preproc_wf = init_func_preproc_wf(bold_file)

        workflow.connect([
            (
                anat_preproc_wf,
                func_preproc_wf,
                [
                    ('outputnode.t1w_preproc', 'inputnode.t1w_preproc'),
                    ('outputnode.t1w_mask', 'inputnode.t1w_mask'),
                    ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
                    ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
                    ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
                    ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
                    ('outputnode.template', 'inputnode.template'),
                    ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
                    ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
                    # Undefined if --fs-no-reconall, but this is safe
                    ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                    ('outputnode.subject_id', 'inputnode.subject_id'),
                    ('outputnode.t1w2fsnative_xfm',
                     'inputnode.t1w2fsnative_xfm'),
                    ('outputnode.fsnative2t1w_xfm',
                     'inputnode.fsnative2t1w_xfm')
                ]),
        ])
    return workflow
示例#5
0
文件: base.py 项目: bpinsard/smriprep
def init_single_subject_wf(
    debug,
    freesurfer,
    hires,
    layout,
    longitudinal,
    low_mem,
    name,
    omp_nthreads,
    output_dir,
    output_spaces,
    reportlets_dir,
    skull_strip_fixed_seed,
    skull_strip_template,
    subject_id,
):
    """
    Create a single subject workflow.

    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from collections import OrderedDict, namedtuple
        from smriprep.workflows.base import init_single_subject_wf
        BIDSLayout = namedtuple('BIDSLayout', ['root'])
        wf = init_single_subject_wf(
            debug=False,
            freesurfer=True,
            hires=True,
            layout=BIDSLayout('.'),
            longitudinal=False,
            low_mem=False,
            name='single_subject_wf',
            omp_nthreads=1,
            output_dir='.',
            output_spaces=OrderedDict([('MNI152NLin2009cAsym', {}),
                                       ('fsaverage5', {})]),
            reportlets_dir='.',
            skull_strip_fixed_seed=False,
            skull_strip_template=('OASIS30ANTs', {}),
            subject_id='test',
        )


    **Parameters**

        debug : bool
            Enable debugging outputs
        freesurfer : bool
            Enable FreeSurfer surface reconstruction (may increase runtime)
        hires : bool
            Enable sub-millimeter preprocessing in FreeSurfer
        layout : BIDSLayout object
            BIDS dataset layout
        longitudinal : bool
            Treat multiple sessions as longitudinal (may increase runtime)
            See sub-workflows for specific differences
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        name : str
            Name of workflow
        omp_nthreads : int
            Maximum number of threads an individual process may use
        output_dir : str
            Directory in which to save derivatives
        output_spaces : OrderedDict
            List of spatial normalization targets. Some parts of pipeline will
            only be instantiated for some output spaces. Valid spaces:
            - Any template identifier from TemplateFlow
            - Path to a template folder organized following TemplateFlow's
            conventions
        reportlets_dir : str
            Directory in which to save reportlets
        skull_strip_fixed_seed : bool
            Do not use a random seed for skull-stripping - will ensure
            run-to-run replicability when used with --omp-nthreads 1
        skull_strip_template : tuple
            Name of ANTs skull-stripping template (e.g., 'OASIS30ANTs') and
            dictionary of template specifications.
        subject_id : str
            List of subject labels

    **Inputs**

        subjects_dir
            FreeSurfer SUBJECTS_DIR

    """
    from ..interfaces.reports import AboutSummary, SubjectSummary
    if name in ('single_subject_wf', 'single_subject_smripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
        }
    else:
        subject_data = collect_data(layout, subject_id)[0]

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *sMRIPprep* {smriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(smriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

For more details of the pipeline, see [the section corresponding
to workflows in *sMRIPrep*'s documentation]\
(https://smriprep.readthedocs.io/en/latest/workflows.html \
"sMRIPrep's documentation").


### References

"""

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=True),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=layout.root),
                        name='bids_info',
                        run_without_submitting=True)

    summary = pe.Node(SubjectSummary(output_spaces=list(output_spaces.keys())),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='summary', keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='about', keep_dtype=True),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        freesurfer=freesurfer,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        num_t1w=len(subject_data['t1w']),
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        output_spaces=output_spaces,
        reportlets_dir=reportlets_dir,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_template=skull_strip_template,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    return workflow
示例#6
0
文件: base.py 项目: mgxd/nibabies
def init_single_subject_wf(subject_id):
    """
    Organize the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from nibabies.workflows.tests import mock_config
            from nibabies.workflows.base import init_single_subject_wf
            with mock_config():
                wf = init_single_subject_wf('01')

    Parameters
    ----------
    subject_id : :obj:`str`
        Subject label for this single-subject workflow.

    Inputs
    ------
    subjects_dir : :obj:`str`
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber
    from niworkflows.interfaces.nilearn import NILEARN_VERSION
    from niworkflows.utils.bids import collect_data
    from niworkflows.utils.spaces import Reference

    from .anatomical import init_infant_anat_wf
    from ..utils.misc import fix_multi_source_name

    name = "single_subject_%s_wf" % subject_id
    subject_data = collect_data(
        config.execution.layout,
        subject_id,
        config.execution.task_id,
        config.execution.echo_idx,
        bids_filters=config.execution.bids_filters,
    )[0]

    if "flair" in config.workflow.ignore:
        subject_data["flair"] = []
    if "t2w" in config.workflow.ignore:
        subject_data["t2w"] = []

    anat_only = config.workflow.anat_only
    anat_derivatives = config.execution.anat_derivatives
    anat_modality = config.workflow.anat_modality
    spaces = config.workflow.spaces
    # Make sure we always go through these two checks
    if not anat_only and not subject_data["bold"]:
        task_id = config.execution.task_id
        raise RuntimeError(
            "No BOLD images found for participant {} and task {}. "
            "All workflows require BOLD images.".format(
                subject_id, task_id if task_id else "<all>"))

    if anat_derivatives:
        from smriprep.utils.bids import collect_derivatives

        std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, ))
        anat_derivatives = collect_derivatives(
            anat_derivatives.absolute(),
            subject_id,
            std_spaces,
            config.workflow.run_reconall,
        )
        if anat_derivatives is None:
            config.loggers.workflow.warning(f"""\
Attempted to access pre-existing anatomical derivatives at \
<{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \
were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \
reconall <{config.workflow.run_reconall}>).""")

    if not anat_derivatives and not subject_data[anat_modality]:
        raise Exception(
            f"No {anat_modality} images found for participant {subject_id}. "
            "All workflows require T1w images.")

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(
        fmriprep_ver=config.environment.version,
        nipype_ver=config.environment.nipype_version,
    )
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://nibabies.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    fmriprep_dir = str(config.execution.fmriprep_dir)

    inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]),
                        name="inputnode")

    bidssrc = pe.Node(
        BIDSDataGrabber(
            subject_data=subject_data,
            anat_only=anat_only,
            anat_derivatives=anat_derivatives,
            subject_id=subject_id,
        ),
        name="bidssrc",
    )

    bids_info = pe.Node(
        BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False),
        name="bids_info",
    )

    summary = pe.Node(
        SubjectSummary(
            std_spaces=spaces.get_spaces(nonstandard=False),
            nstd_spaces=spaces.get_spaces(standard=False),
        ),
        name="summary",
        run_without_submitting=True,
    )

    about = pe.Node(
        AboutSummary(version=config.environment.version,
                     command=" ".join(sys.argv)),
        name="about",
        run_without_submitting=True,
    )

    ds_report_summary = pe.Node(
        DerivativesDataSink(
            base_directory=fmriprep_dir,
            desc="summary",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_summary",
        run_without_submitting=True,
    )

    ds_report_about = pe.Node(
        DerivativesDataSink(
            base_directory=fmriprep_dir,
            desc="about",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_about",
        run_without_submitting=True,
    )

    # Preprocessing of anatomical (includes registration to UNCInfant)
    anat_preproc_wf = init_infant_anat_wf(
        ants_affine_init=config.workflow.ants_affine_init or True,
        age_months=config.workflow.age_months,
        anat_modality=anat_modality,
        t1w=subject_data['t1w'],
        t2w=subject_data['t2w'],
        bids_root=config.execution.bids_dir,
        existing_derivatives=anat_derivatives,
        freesurfer=config.workflow.run_reconall,
        longitudinal=config.workflow.longitudinal,
        omp_nthreads=config.nipype.omp_nthreads,
        output_dir=fmriprep_dir,
        segmentation_atlases=config.execution.segmentation_atlases_dir,
        skull_strip_mode=config.workflow.skull_strip_t1w,
        skull_strip_template=Reference.from_string(
            config.workflow.skull_strip_template)[0],
        sloppy=config.execution.sloppy,
        spaces=spaces,
    )

    # fmt: off
    workflow.connect([
        (inputnode, anat_preproc_wf, [
            ('subjects_dir', 'inputnode.subjects_dir'),
        ]),
        (inputnode, summary, [
            ('subjects_dir', 'subjects_dir'),
        ]),
        (bidssrc, summary, [
            ('bold', 'bold'),
        ]),
        (bids_info, summary, [
            ('subject', 'subject_id'),
        ]),
        (bids_info, anat_preproc_wf, [
            (('subject', _prefix), 'inputnode.subject_id'),
        ]),
        (
            bidssrc,
            anat_preproc_wf,
            [
                ('t1w', 'inputnode.t1w'),
                ('t2w', 'inputnode.t2w'),
                # ('roi', 'inputnode.roi'),
                # ('flair', 'inputnode.flair'),
            ]),
        (summary, ds_report_summary, [
            ('out_report', 'in_file'),
        ]),
        (about, ds_report_about, [
            ('out_report', 'in_file'),
        ]),
    ])

    if not anat_derivatives:
        workflow.connect([
            (bidssrc, bids_info, [
                (('t1w', fix_multi_source_name), 'in_file'),
            ]),
            (bidssrc, summary, [
                ('t1w', 't1w'),
                ('t2w', 't2w'),
            ]),
            (bidssrc, ds_report_summary, [
                (('t1w', fix_multi_source_name), 'source_file'),
            ]),
            (bidssrc, ds_report_about, [
                (('t1w', fix_multi_source_name), 'source_file'),
            ]),
        ])
    else:
        workflow.connect([
            (bidssrc, bids_info, [
                (('bold', fix_multi_source_name), 'in_file'),
            ]),
            (anat_preproc_wf, summary, [
                ('outputnode.t1w_preproc', 't1w'),
            ]),
            (anat_preproc_wf, ds_report_summary, [
                ('outputnode.t1w_preproc', 'source_file'),
            ]),
            (anat_preproc_wf, ds_report_about, [
                ('outputnode.t1w_preproc', 'source_file'),
            ]),
        ])
    # fmt: on

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split(".")[-1].startswith("ds_"):
            workflow.get_node(node).interface.out_path_base = ""

    if anat_only:
        return workflow

    raise NotImplementedError("BOLD processing is not yet implemented.")

    # Append the functional section to the existing anatomical exerpt
    # That way we do not need to stream down the number of bold datasets
    anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + f"""

Functional data preprocessing

: For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""")

    for bold_file in subject_data["bold"]:
        func_preproc_wf = init_func_preproc_wf(bold_file)

        # fmt: off
        workflow.connect([
            (
                anat_preproc_wf,
                func_preproc_wf,
                [
                    ('outputnode.anat_preproc', 'inputnode.anat_preproc'),
                    ('outputnode.anat_mask', 'inputnode.anat_mask'),
                    ('outputnode.anat_dseg', 'inputnode.anat_dseg'),
                    ('outputnode.anat_aseg', 'inputnode.anat_aseg'),
                    ('outputnode.anat_aparc', 'inputnode.anat_aparc'),
                    ('outputnode.anat_tpms', 'inputnode.anat_tpms'),
                    ('outputnode.template', 'inputnode.template'),
                    ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
                    ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
                    # Undefined if --fs-no-reconall, but this is safe
                    ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                    ('outputnode.subject_id', 'inputnode.subject_id'),
                    ('outputnode.anat2fsnative_xfm',
                     'inputnode.t1w2fsnative_xfm'),
                    ('outputnode.fsnative2anat_xfm',
                     'inputnode.fsnative2t1w_xfm'),
                ]),
        ])
        # fmt: on

    return workflow
示例#7
0
def init_single_subject_wf(subject_id):
    """
    Organize the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from nibabies.workflows.tests import mock_config
            from nibabies.workflows.base import init_single_subject_wf
            with mock_config():
                wf = init_single_subject_wf('01')

    Parameters
    ----------
    subject_id : :obj:`str`
        Subject label for this single-subject workflow.

    Inputs
    ------
    subjects_dir : :obj:`str`
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber
    from niworkflows.interfaces.nilearn import NILEARN_VERSION
    from niworkflows.utils.bids import collect_data
    from niworkflows.utils.spaces import Reference

    from .anatomical import init_infant_anat_wf
    from ..utils.misc import fix_multi_source_name

    name = "single_subject_%s_wf" % subject_id
    subject_data = collect_data(
        config.execution.layout,
        subject_id,
        config.execution.task_id,
        config.execution.echo_idx,
        bids_filters=config.execution.bids_filters,
    )[0]

    if "flair" in config.workflow.ignore:
        subject_data["flair"] = []
    if "t2w" in config.workflow.ignore:
        subject_data["t2w"] = []

    anat_only = config.workflow.anat_only
    anat_derivatives = config.execution.anat_derivatives
    anat_modality = config.workflow.anat_modality
    spaces = config.workflow.spaces
    # Make sure we always go through these two checks
    if not anat_only and not subject_data["bold"]:
        task_id = config.execution.task_id
        raise RuntimeError(
            "No BOLD images found for participant {} and task {}. "
            "All workflows require BOLD images.".format(
                subject_id, task_id if task_id else "<all>"))

    if anat_derivatives:
        from smriprep.utils.bids import collect_derivatives

        std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, ))
        anat_derivatives = collect_derivatives(
            anat_derivatives.absolute(),
            subject_id,
            std_spaces,
            config.workflow.run_reconall,
        )
        if anat_derivatives is None:
            config.loggers.workflow.warning(f"""\
Attempted to access pre-existing anatomical derivatives at \
<{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \
were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \
reconall <{config.workflow.run_reconall}>).""")

    if not anat_derivatives and not subject_data[anat_modality]:
        raise Exception(
            f"No {anat_modality} images found for participant {subject_id}. "
            "All workflows require T1w images.")

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(
        fmriprep_ver=config.environment.version,
        nipype_ver=config.environment.nipype_version,
    )
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://nibabies.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    nibabies_dir = str(config.execution.nibabies_dir)

    inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]),
                        name="inputnode")

    bidssrc = pe.Node(
        BIDSDataGrabber(
            subject_data=subject_data,
            anat_only=anat_only,
            anat_derivatives=anat_derivatives,
            subject_id=subject_id,
        ),
        name="bidssrc",
    )

    bids_info = pe.Node(
        BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False),
        name="bids_info",
    )

    summary = pe.Node(
        SubjectSummary(
            std_spaces=spaces.get_spaces(nonstandard=False),
            nstd_spaces=spaces.get_spaces(standard=False),
        ),
        name="summary",
        run_without_submitting=True,
    )

    about = pe.Node(
        AboutSummary(version=config.environment.version,
                     command=" ".join(sys.argv)),
        name="about",
        run_without_submitting=True,
    )

    ds_report_summary = pe.Node(
        DerivativesDataSink(
            base_directory=nibabies_dir,
            desc="summary",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_summary",
        run_without_submitting=True,
    )

    ds_report_about = pe.Node(
        DerivativesDataSink(
            base_directory=nibabies_dir,
            desc="about",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_about",
        run_without_submitting=True,
    )

    # Preprocessing of anatomical (includes registration to UNCInfant)
    anat_preproc_wf = init_infant_anat_wf(
        ants_affine_init=config.workflow.ants_affine_init or True,
        age_months=config.workflow.age_months,
        anat_modality=anat_modality,
        t1w=subject_data["t1w"],
        t2w=subject_data["t2w"],
        bids_root=config.execution.bids_dir,
        existing_derivatives=anat_derivatives,
        freesurfer=config.workflow.run_reconall,
        longitudinal=config.workflow.longitudinal,
        omp_nthreads=config.nipype.omp_nthreads,
        output_dir=nibabies_dir,
        segmentation_atlases=config.execution.segmentation_atlases_dir,
        skull_strip_mode=config.workflow.skull_strip_t1w,
        skull_strip_template=Reference.from_string(
            config.workflow.skull_strip_template)[0],
        sloppy=config.execution.sloppy,
        spaces=spaces,
    )

    # fmt: off
    workflow.connect([
        (inputnode, anat_preproc_wf, [
            ('subjects_dir', 'inputnode.subjects_dir'),
        ]),
        (inputnode, summary, [
            ('subjects_dir', 'subjects_dir'),
        ]),
        (bidssrc, summary, [
            ('bold', 'bold'),
        ]),
        (bids_info, summary, [
            ('subject', 'subject_id'),
        ]),
        (bids_info, anat_preproc_wf, [
            (('subject', _prefix), 'inputnode.subject_id'),
        ]),
        (
            bidssrc,
            anat_preproc_wf,
            [
                ('t1w', 'inputnode.t1w'),
                ('t2w', 'inputnode.t2w'),
                # ('roi', 'inputnode.roi'),
                # ('flair', 'inputnode.flair'),
            ]),
        (summary, ds_report_summary, [
            ('out_report', 'in_file'),
        ]),
        (about, ds_report_about, [
            ('out_report', 'in_file'),
        ]),
    ])

    if not anat_derivatives:
        workflow.connect([
            (bidssrc, bids_info, [
                (('t1w', fix_multi_source_name), 'in_file'),
            ]),
            (bidssrc, summary, [
                ('t1w', 't1w'),
                ('t2w', 't2w'),
            ]),
            (bidssrc, ds_report_summary, [
                (('t1w', fix_multi_source_name), 'source_file'),
            ]),
            (bidssrc, ds_report_about, [
                (('t1w', fix_multi_source_name), 'source_file'),
            ]),
        ])
    else:
        workflow.connect([
            (bidssrc, bids_info, [
                (('bold', fix_multi_source_name), 'in_file'),
            ]),
            (anat_preproc_wf, summary, [
                ('outputnode.t1w_preproc', 't1w'),
            ]),
            (anat_preproc_wf, ds_report_summary, [
                ('outputnode.t1w_preproc', 'source_file'),
            ]),
            (anat_preproc_wf, ds_report_about, [
                ('outputnode.t1w_preproc', 'source_file'),
            ]),
        ])
    # fmt: on

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split(".")[-1].startswith("ds_"):
            workflow.get_node(node).interface.out_path_base = ""

    if anat_only:
        return workflow

    # Susceptibility distortion correction
    fmap_estimators = None
    if "fieldmap" not in config.workflow.ignore:
        from sdcflows.utils.wrangler import find_estimators
        from sdcflows.workflows.base import init_fmap_preproc_wf

        # SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation
        # For now, no fmapless
        fmap_estimators = find_estimators(
            layout=config.execution.layout,
            subject=subject_id,
            fmapless=False,  # config.workflow.use_syn,
            force_fmapless=False,  # config.workflow.force_syn,
        )

    # Append the functional section to the existing anatomical exerpt
    # That way we do not need to stream down the number of bold datasets
    anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ if hasattr(
        anat_preproc_wf, '__postdesc__') else "") + f"""

Functional data preprocessing

: For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""")

    # calculate reference image(s) for BOLD images
    # group all BOLD files based on same:
    # 1) session
    # 2) PE direction
    # 3) total readout time
    from niworkflows.workflows.epi.refmap import init_epi_reference_wf

    _, bold_groupings = group_bolds_ref(layout=config.execution.layout,
                                        subject=subject_id)
    if any(not x for x in bold_groupings):
        print("No BOLD files found for one or more reference groupings")
        return workflow

    func_preproc_wfs = []
    for idx, bold_files in enumerate(bold_groupings):
        bold_ref_wf = init_epi_reference_wf(
            auto_bold_nss=True,
            name=f'bold_reference_wf{idx}',
            omp_nthreads=config.nipype.omp_nthreads)
        bold_ref_wf.inputs.inputnode.in_files = bold_files
        for idx, bold_file in enumerate(bold_files):
            func_preproc_wf = init_func_preproc_wf(
                bold_file, has_fieldmap=bool(fmap_estimators))
            # fmt: off
            workflow.connect([
                (bold_ref_wf, func_preproc_wf, [
                    ('outputnode.epi_ref_file', 'inputnode.bold_ref'),
                    (('outputnode.xfm_files', _select_iter_idx, idx),
                     'inputnode.bold_ref_xfm'),
                    (('outputnode.n_dummy', _select_iter_idx, idx),
                     'inputnode.n_dummy_scans'),
                ]),
                (
                    anat_preproc_wf,
                    func_preproc_wf,
                    [
                        ('outputnode.anat_preproc', 'inputnode.anat_preproc'),
                        ('outputnode.anat_mask', 'inputnode.anat_mask'),
                        ('outputnode.anat_brain', 'inputnode.anat_brain'),
                        ('outputnode.anat_dseg', 'inputnode.anat_dseg'),
                        ('outputnode.anat_aseg', 'inputnode.anat_aseg'),
                        ('outputnode.anat_aparc', 'inputnode.anat_aparc'),
                        ('outputnode.anat_tpms', 'inputnode.anat_tpms'),
                        ('outputnode.template', 'inputnode.template'),
                        ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
                        ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
                        # Undefined if --fs-no-reconall, but this is safe
                        ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                        ('outputnode.subject_id', 'inputnode.subject_id'),
                        ('outputnode.anat2fsnative_xfm',
                         'inputnode.anat2fsnative_xfm'),
                        ('outputnode.fsnative2anat_xfm',
                         'inputnode.fsnative2anat_xfm'),
                    ]),
            ])
            # fmt: on
            func_preproc_wfs.append(func_preproc_wf)

    if not fmap_estimators:
        config.loggers.workflow.warning(
            "Data for fieldmap estimation not present. Please note that these data "
            "will not be corrected for susceptibility distortions.")
        return workflow

    config.loggers.workflow.info(
        f"Fieldmap estimators found: {[e.method for e in fmap_estimators]}")

    from sdcflows.workflows.base import init_fmap_preproc_wf
    from sdcflows import fieldmaps as fm

    fmap_wf = init_fmap_preproc_wf(
        debug=bool(
            config.execution.debug),  # TODO: Add debug option for fieldmaps
        estimators=fmap_estimators,
        omp_nthreads=config.nipype.omp_nthreads,
        output_dir=nibabies_dir,
        subject=subject_id,
    )
    fmap_wf.__desc__ = f"""

Fieldmap data preprocessing

: A total of {len(fmap_estimators)} fieldmaps were found available within the input
BIDS structure for this particular subject.
"""

    for func_preproc_wf in func_preproc_wfs:
        # fmt: off
        workflow.connect([
            (fmap_wf, func_preproc_wf, [
                ("outputnode.fmap", "inputnode.fmap"),
                ("outputnode.fmap_ref", "inputnode.fmap_ref"),
                ("outputnode.fmap_coeff", "inputnode.fmap_coeff"),
                ("outputnode.fmap_mask", "inputnode.fmap_mask"),
                ("outputnode.fmap_id", "inputnode.fmap_id"),
            ]),
        ])
        # fmt: on

    # Overwrite ``out_path_base`` of sdcflows's DataSinks
    for node in fmap_wf.list_node_names():
        if node.split(".")[-1].startswith("ds_"):
            fmap_wf.get_node(node).interface.out_path_base = ""

    # Step 3: Manually connect PEPOLAR
    for estimator in fmap_estimators:
        config.loggers.workflow.info(f"""\
Setting-up fieldmap "{estimator.bids_id}" ({estimator.method}) with \
<{', '.join(s.path.name for s in estimator.sources)}>""")
        if estimator.method in (fm.EstimatorType.MAPPED,
                                fm.EstimatorType.PHASEDIFF):
            continue

        suffices = set(s.suffix for s in estimator.sources)

        if estimator.method == fm.EstimatorType.PEPOLAR and sorted(
                suffices) == ["epi"]:
            getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").in_data = [
                str(s.path) for s in estimator.sources
            ]
            getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").metadata = [
                s.metadata for s in estimator.sources
            ]
            continue

        if estimator.method == fm.EstimatorType.PEPOLAR:
            raise NotImplementedError(
                "Sophisticated PEPOLAR schemes (e.g., using DWI+EPI) are unsupported."
            )

    return workflow