Ejemplo n.º 1
0
def init_bbreg_wf(use_bbr,
                  bold2t1w_dof,
                  bold2t1w_init,
                  omp_nthreads,
                  name='bbreg_wf'):
    """
    Build a workflow to run FreeSurfer's ``bbregister``.

    This workflow uses FreeSurfer's ``bbregister`` to register a BOLD image to
    a T1-weighted structural image.

    It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`,
    which performs the same task using FSL's FLIRT with a BBR cost function.
    The ``use_bbr`` option permits a high degree of control over registration.
    If ``False``, standard, affine coregistration will be performed using
    FreeSurfer's ``mri_coreg`` tool.
    If ``True``, ``bbregister`` will be seeded with the initial transform found
    by ``mri_coreg`` (equivalent to running ``bbregister --init-coreg``).
    If ``None``, after ``bbregister`` is run, the resulting affine transform
    will be compared to the initial transform found by ``mri_coreg``.
    Excessive deviation will result in rejecting the BBR refinement and
    accepting the original, affine registration.

    Workflow Graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.registration import init_bbreg_wf
            wf = init_bbreg_wf(use_bbr=True, bold2t1w_dof=9,
                               bold2t1w_init='register', omp_nthreads=1)


    Parameters
    ----------
    use_bbr : :obj:`bool` or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    bold2t1w_init : str, 'header' or 'register'
        If ``'header'``, use header information for initialization of BOLD and T1 images.
        If ``'register'``, align volumes by their centers.
    name : :obj:`str`, optional
        Workflow name (default: bbreg_wf)

    Inputs
    ------
    in_file
        Reference BOLD image to be registered
    fsnative2t1w_xfm
        FSL-style affine matrix translating from FreeSurfer T1.mgz to T1w
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID (must have folder in SUBJECTS_DIR)
    t1w_brain
        Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)
    t1w_dseg
        Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)

    Outputs
    -------
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    itk_t1_to_bold
        Affine transform from T1 space to BOLD space (ITK format)
    out_report
        Reportlet for assessing registration quality
    fallback
        Boolean indicating whether BBR was rejected (mri_coreg registration returned)

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    # See https://github.com/nipreps/fmriprep/issues/768
    from niworkflows.interfaces.freesurfer import (
        PatchedBBRegisterRPT as BBRegisterRPT, PatchedMRICoregRPT as
        MRICoregRPT, PatchedLTAConvert as LTAConvert)
    from niworkflows.interfaces.nitransforms import ConcatenateXFMs

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`bbregister` (FreeSurfer) which implements boundary-based registration [@bbr].
Co-registration was configured with {dof} degrees of freedom{reason}.
""".format(dof={
        6: 'six',
        9: 'nine',
        12: 'twelve'
    }[bold2t1w_dof],
           reason='' if bold2t1w_dof == 6 else
           'to account for distortions remaining in the BOLD reference')

    inputnode = pe.Node(
        niu.IdentityInterface([
            'in_file',
            'fsnative2t1w_xfm',
            'subjects_dir',
            'subject_id',  # BBRegister
            't1w_dseg',
            't1w_brain'
        ]),  # FLIRT BBR
        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        ['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
                         name='outputnode')

    if bold2t1w_init not in ("register", "header"):
        raise ValueError(
            f"Unknown BOLD-T1w initialization option: {bold2t1w_init}")

    # For now make BBR unconditional - in the future, we can fall back to identity,
    # but adding the flexibility without testing seems a bit dangerous
    if bold2t1w_init == "header":
        if use_bbr is False:
            raise ValueError("Cannot disable BBR and use header registration")
        if use_bbr is None:
            LOGGER.warning(
                "Initializing BBR with header; affine fallback disabled")
            use_bbr = True

    merge_ltas = pe.Node(niu.Merge(2),
                         name='merge_ltas',
                         run_without_submitting=True)
    concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name='concat_xfm')

    workflow.connect([
        # Output ITK transforms
        (inputnode, merge_ltas, [('fsnative2t1w_xfm', 'in2')]),
        (merge_ltas, concat_xfm, [('out', 'in_xfms')]),
        (concat_xfm, outputnode, [('out_xfm', 'itk_bold_to_t1')]),
        (concat_xfm, outputnode, [('out_inv', 'itk_t1_to_bold')]),
    ])

    # Define both nodes, but only connect conditionally
    mri_coreg = pe.Node(MRICoregRPT(dof=bold2t1w_dof,
                                    sep=[4],
                                    ftol=0.0001,
                                    linmintol=0.01,
                                    generate_report=not use_bbr),
                        name='mri_coreg',
                        n_procs=omp_nthreads,
                        mem_gb=5)

    bbregister = pe.Node(BBRegisterRPT(dof=bold2t1w_dof,
                                       contrast_type='t2',
                                       registered_file=True,
                                       out_lta_file=True,
                                       generate_report=True),
                         name='bbregister',
                         mem_gb=12)

    # Use mri_coreg
    if bold2t1w_init == "register":
        workflow.connect([
            (inputnode, mri_coreg, [('subjects_dir', 'subjects_dir'),
                                    ('subject_id', 'subject_id'),
                                    ('in_file', 'source_file')]),
        ])

        # Short-circuit workflow building, use initial registration
        if use_bbr is False:
            workflow.connect([
                (mri_coreg, outputnode, [('out_report', 'out_report')]),
                (mri_coreg, merge_ltas, [('out_lta_file', 'in1')])
            ])
            outputnode.inputs.fallback = True

            return workflow

    # Use bbregister
    workflow.connect([
        (inputnode, bbregister, [('subjects_dir', 'subjects_dir'),
                                 ('subject_id', 'subject_id'),
                                 ('in_file', 'source_file')]),
    ])

    if bold2t1w_init == "header":
        bbregister.inputs.init = "header"
    else:
        workflow.connect([(mri_coreg, bbregister, [('out_lta_file',
                                                    'init_reg_file')])])

    # Short-circuit workflow building, use boundary-based registration
    if use_bbr is True:
        workflow.connect([(bbregister, outputnode, [('out_report',
                                                     'out_report')]),
                          (bbregister, merge_ltas, [('out_lta_file', 'in1')])])
        outputnode.inputs.fallback = False

        return workflow

    # Only reach this point if bold2t1w_init is "register" and use_bbr is None

    transforms = pe.Node(niu.Merge(2),
                         run_without_submitting=True,
                         name='transforms')
    reports = pe.Node(niu.Merge(2),
                      run_without_submitting=True,
                      name='reports')

    lta_ras2ras = pe.MapNode(LTAConvert(out_lta=True),
                             iterfield=['in_lta'],
                             name='lta_ras2ras',
                             mem_gb=2)
    compare_transforms = pe.Node(niu.Function(function=compare_xforms),
                                 name='compare_transforms')

    select_transform = pe.Node(niu.Select(),
                               run_without_submitting=True,
                               name='select_transform')
    select_report = pe.Node(niu.Select(),
                            run_without_submitting=True,
                            name='select_report')

    workflow.connect([
        (bbregister, transforms, [('out_lta_file', 'in1')]),
        (mri_coreg, transforms, [('out_lta_file', 'in2')]),
        # Normalize LTA transforms to RAS2RAS (inputs are VOX2VOX) and compare
        (transforms, lta_ras2ras, [('out', 'in_lta')]),
        (lta_ras2ras, compare_transforms, [('out_lta', 'lta_list')]),
        (compare_transforms, outputnode, [('out', 'fallback')]),
        # Select output transform
        (transforms, select_transform, [('out', 'inlist')]),
        (compare_transforms, select_transform, [('out', 'index')]),
        (select_transform, merge_ltas, [('out', 'in1')]),
        # Select output report
        (bbregister, reports, [('out_report', 'in1')]),
        (mri_coreg, reports, [('out_report', 'in2')]),
        (reports, select_report, [('out', 'inlist')]),
        (compare_transforms, select_report, [('out', 'index')]),
        (select_report, outputnode, [('out', 'out_report')]),
    ])

    return workflow
Ejemplo n.º 2
0
def init_single_subject_wf(
    anat_only,
    aroma_melodic_dim,
    bold2t1w_dof,
    cifti_output,
    debug,
    dummy_scans,
    echo_idx,
    err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    hires,
    ignore,
    layout,
    longitudinal,
    low_mem,
    medial_surface_nan,
    name,
    omp_nthreads,
    output_dir,
    output_spaces,
    reportlets_dir,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    skull_strip_fixed_seed,
    skull_strip_template,
    subject_id,
    t2s_coreg,
    task_id,
    use_aroma,
    use_bbr,
    use_syn,
):
    """
    This workflow organizes the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.base import init_single_subject_wf
            from collections import namedtuple, OrderedDict
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            wf = init_single_subject_wf(
                anat_only=False,
                aroma_melodic_dim=-200,
                bold2t1w_dof=9,
                cifti_output=False,
                debug=False,
                dummy_scans=None,
                echo_idx=None,
                err_on_aroma_warn=False,
                fmap_bspline=False,
                fmap_demean=True,
                force_syn=True,
                freesurfer=True,
                hires=True,
                ignore=[],
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                medial_surface_nan=False,
                name='single_subject_wf',
                omp_nthreads=1,
                output_dir='.',
                output_spaces=OrderedDict([
                    ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
                    ('T1w', {}), ('fsnative', {})]),
                reportlets_dir='.',
                regressors_all_comps=False,
                regressors_dvars_th=1.5,
                regressors_fd_th=0.5,
                skull_strip_fixed_seed=False,
                skull_strip_template=('OASIS30ANTs', {}),
                subject_id='test',
                t2s_coreg=False,
                task_id='',
                use_aroma=False,
                use_bbr=True,
                use_syn=True,
            )

    Parameters
    ----------
    anat_only : bool
        Disable functional workflows
    aroma_melodic_dim : int
        Maximum number of components identified by MELODIC within ICA-AROMA
        (default is -200, i.e., no limitation).
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    cifti_output : bool
        Generate bold CIFTI file in output spaces
    debug : bool
        Enable debugging outputs
    dummy_scans : int or None
        Number of volumes to consider as non steady state
    echo_idx : int or None
        Index of echo to preprocess in multiecho BOLD series,
        or ``None`` to preprocess all
    err_on_aroma_warn : bool
        Do not fail on ICA-AROMA errors
    fmap_bspline : bool
        **Experimental**: Fit B-Spline field using least-squares
    fmap_demean : bool
        Demean voxel-shift map during unwarp
    force_syn : bool
        **Temporary**: Always run SyN-based SDC
    freesurfer : bool
        Enable FreeSurfer surface reconstruction (may increase runtime)
    hires : bool
        Enable sub-millimeter preprocessing in FreeSurfer
    ignore : list
        Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : bool
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : bool
        Write uncompressed .nii files in some cases to reduce memory usage
    medial_surface_nan : bool
        Replace medial wall values with NaNs on functional GIFTI files
    name : str
        Name of workflow
    omp_nthreads : int
        Maximum number of threads an individual process may use
    output_dir : str
        Directory in which to save derivatives
    output_spaces : OrderedDict
        Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
        ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
        nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
        or paths pointing to custom templates organized in a TemplateFlow-like structure.
        Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
        could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
        resolution version of the selected template).
    reportlets_dir : str
        Directory in which to save reportlets
    regressors_all_comps
        Return all CompCor component time series instead of the top fraction
    regressors_fd_th
        Criterion for flagging framewise displacement outliers
    regressors_dvars_th
        Criterion for flagging DVARS outliers
    skull_strip_fixed_seed : bool
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    skull_strip_template : tuple
        Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
        and corresponding dictionary of output-space modifiers.
    subject_id : str
        List of subject labels
    t2s_coreg : bool
        For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
    task_id : str or None
        Task ID of BOLD series to preprocess, or ``None`` to preprocess all
    use_aroma : bool
        Perform ICA-AROMA on MNI-resampled functional series
    use_bbr : bool or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    use_syn : bool
        **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
        If fieldmaps are present and enabled, this is not run, by default.

    Inputs
    ------
    subjects_dir : str
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    from ..config import NONSTANDARD_REFERENCES
    if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
            'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
        }
    else:
        subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]

    # Make sure we always go through these two checks
    if not anat_only and subject_data['bold'] == []:
        raise Exception("No BOLD images found for participant {} and task {}. "
                        "All workflows require BOLD images.".format(
                            subject_id, task_id if task_id else '<all>'))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    # Filter out standard spaces to a separate dict
    std_spaces = OrderedDict([(key, modifiers)
                              for key, modifiers in output_spaces.items()
                              if key not in NONSTANDARD_REFERENCES])

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=anat_only),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=layout.root, bids_validate=False),
                        name='bids_info')

    summary = pe.Node(SubjectSummary(
        std_spaces=list(std_spaces.keys()),
        nstd_spaces=sorted(
            set(NONSTANDARD_REFERENCES).intersection(output_spaces.keys()))),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='summary', keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='about', keep_dtype=True),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        freesurfer=freesurfer,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        num_t1w=len(subject_data['t1w']),
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        output_spaces=std_spaces,
        reportlets_dir=reportlets_dir,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_template=skull_strip_template,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            workflow.get_node(node).interface.out_path_base = 'fmriprep'

    if anat_only:
        return workflow

    for bold_file in subject_data['bold']:
        func_preproc_wf = init_func_preproc_wf(
            aroma_melodic_dim=aroma_melodic_dim,
            bold2t1w_dof=bold2t1w_dof,
            bold_file=bold_file,
            cifti_output=cifti_output,
            debug=debug,
            dummy_scans=dummy_scans,
            err_on_aroma_warn=err_on_aroma_warn,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            force_syn=force_syn,
            freesurfer=freesurfer,
            ignore=ignore,
            layout=layout,
            low_mem=low_mem,
            medial_surface_nan=medial_surface_nan,
            num_bold=len(subject_data['bold']),
            omp_nthreads=omp_nthreads,
            output_dir=output_dir,
            output_spaces=output_spaces,
            reportlets_dir=reportlets_dir,
            regressors_all_comps=regressors_all_comps,
            regressors_fd_th=regressors_fd_th,
            regressors_dvars_th=regressors_dvars_th,
            t2s_coreg=t2s_coreg,
            use_aroma=use_aroma,
            use_bbr=use_bbr,
            use_syn=use_syn,
        )

        workflow.connect([
            (
                anat_preproc_wf,
                func_preproc_wf,
                [
                    (('outputnode.t1w_preproc', _pop),
                     'inputnode.t1w_preproc'),
                    ('outputnode.t1w_brain', 'inputnode.t1w_brain'),
                    ('outputnode.t1w_mask', 'inputnode.t1w_mask'),
                    ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
                    ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
                    ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
                    ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
                    ('outputnode.template', 'inputnode.template'),
                    ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
                    ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
                    ('outputnode.joint_template', 'inputnode.joint_template'),
                    ('outputnode.joint_anat2std_xfm',
                     'inputnode.joint_anat2std_xfm'),
                    ('outputnode.joint_std2anat_xfm',
                     'inputnode.joint_std2anat_xfm'),
                    # Undefined if --no-freesurfer, but this is safe
                    ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                    ('outputnode.subject_id', 'inputnode.subject_id'),
                    ('outputnode.t1w2fsnative_xfm',
                     'inputnode.t1w2fsnative_xfm'),
                    ('outputnode.fsnative2t1w_xfm',
                     'inputnode.fsnative2t1w_xfm')
                ]),
        ])

    return workflow
Ejemplo n.º 3
0
def init_syn_sdc_wf(omp_nthreads, bold_pe=None,
                    atlas_threshold=3, name='syn_sdc_wf'):
    """
    This workflow takes a skull-stripped T1w image and reference BOLD image and
    estimates a susceptibility distortion correction warp, using ANTs symmetric
    normalization (SyN) and the average fieldmap atlas described in
    [Treiber2016]_.

    SyN deformation is restricted to the phase-encoding (PE) direction.
    If no PE direction is specified, anterior-posterior PE is assumed.

    SyN deformation is also restricted to regions that are expected to have a
    >3mm (approximately 1 voxel) warp, based on the fieldmap atlas.

    This technique is a variation on those developed in [Huntenburg2014]_ and
    [Wang2017]_.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.fieldmap.syn import init_syn_sdc_wf
        wf = init_syn_sdc_wf(
            bold_pe='j',
            omp_nthreads=8)

    **Inputs**

        bold_ref
            reference image
        bold_ref_brain
            skull-stripped reference image
        template : str
            Name of template targeted by ``template`` output space
        t1_brain
            skull-stripped, bias-corrected structural image
        t1_2_mni_reverse_transform
            inverse registration transform of T1w image to MNI template

    **Outputs**

        out_reference
            the ``bold_ref`` image after unwarping
        out_reference_brain
            the ``bold_ref_brain`` image after unwarping
        out_warp
            the corresponding :abbr:`DFM (displacements field map)` compatible with
            ANTs
        out_mask
            mask of the unwarped input file

    """

    if bold_pe is None or bold_pe[0] not in ['i', 'j']:
        LOGGER.warning('Incorrect phase-encoding direction, assuming PA (posterior-to-anterior).')
        bold_pe = 'j'

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A deformation field to correct for susceptibility distortions was estimated
based on *fMRIPrep*'s *fieldmap-less* approach.
The deformation field is that resulting from co-registering the BOLD reference
to the same-subject T1w-reference with its intensity inverted [@fieldmapless1;
@fieldmapless2].
Registration is performed with `antsRegistration` (ANTs {ants_ver}), and
the process regularized by constraining deformation to be nonzero only
along the phase-encoding direction, and modulated with an average fieldmap
template [@fieldmapless3].
""".format(ants_ver=Registration().version or '<ver>')
    inputnode = pe.Node(
        niu.IdentityInterface(['bold_ref', 'bold_ref_brain', 'template',
                               't1_brain', 't1_2_mni_reverse_transform']),
        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(['out_reference', 'out_reference_brain',
                               'out_mask', 'out_warp']),
        name='outputnode')

    # Collect predefined data
    # Atlas image and registration affine
    atlas_img = pkgr.resource_filename('fmriprep', 'data/fmap_atlas.nii.gz')
    # Registration specifications
    affine_transform = pkgr.resource_filename('fmriprep', 'data/affine.json')
    syn_transform = pkgr.resource_filename('fmriprep', 'data/susceptibility_syn.json')

    invert_t1w = pe.Node(Rescale(invert=True), name='invert_t1w',
                         mem_gb=0.3)

    ref_2_t1 = pe.Node(Registration(from_file=affine_transform),
                       name='ref_2_t1', n_procs=omp_nthreads)
    t1_2_ref = pe.Node(ApplyTransforms(invert_transform_flags=[True]),
                       name='t1_2_ref', n_procs=omp_nthreads)

    # 1) BOLD -> T1; 2) MNI -> T1; 3) ATLAS -> MNI
    transform_list = pe.Node(niu.Merge(3), name='transform_list',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Inverting (1), then applying in reverse order:
    #
    # ATLAS -> MNI -> T1 -> BOLD
    atlas_2_ref = pe.Node(
        ApplyTransforms(invert_transform_flags=[True, False, False]),
        name='atlas_2_ref', n_procs=omp_nthreads,
        mem_gb=0.3)
    atlas_2_ref.inputs.input_image = atlas_img

    threshold_atlas = pe.Node(
        fsl.maths.MathsCommand(args='-thr {:.8g} -bin'.format(atlas_threshold),
                               output_datatype='char'),
        name='threshold_atlas', mem_gb=0.3)

    fixed_image_masks = pe.Node(niu.Merge(2), name='fixed_image_masks',
                                mem_gb=DEFAULT_MEMORY_MIN_GB)
    fixed_image_masks.inputs.in1 = 'NULL'

    restrict = [[int(bold_pe[0] == 'i'), int(bold_pe[0] == 'j'), 0]] * 2
    syn = pe.Node(
        Registration(from_file=syn_transform, restrict_deformation=restrict),
        name='syn', n_procs=omp_nthreads)

    unwarp_ref = pe.Node(ApplyTransforms(
        dimension=3, float=True, interpolation='LanczosWindowedSinc'),
        name='unwarp_ref')

    skullstrip_bold_wf = init_skullstrip_bold_wf()

    workflow.connect([
        (inputnode, invert_t1w, [('t1_brain', 'in_file'),
                                 ('bold_ref', 'ref_file')]),
        (inputnode, ref_2_t1, [('bold_ref_brain', 'moving_image')]),
        (invert_t1w, ref_2_t1, [('out_file', 'fixed_image')]),
        (inputnode, t1_2_ref, [('bold_ref', 'reference_image')]),
        (invert_t1w, t1_2_ref, [('out_file', 'input_image')]),
        (ref_2_t1, t1_2_ref, [('forward_transforms', 'transforms')]),
        (ref_2_t1, transform_list, [('forward_transforms', 'in1')]),
        (inputnode, transform_list, [
            ('t1_2_mni_reverse_transform', 'in2'),
            (('template', _prior_path), 'in3')]),
        (inputnode, atlas_2_ref, [('bold_ref', 'reference_image')]),
        (transform_list, atlas_2_ref, [('out', 'transforms')]),
        (atlas_2_ref, threshold_atlas, [('output_image', 'in_file')]),
        (threshold_atlas, fixed_image_masks, [('out_file', 'in2')]),
        (inputnode, syn, [('bold_ref_brain', 'moving_image')]),
        (t1_2_ref, syn, [('output_image', 'fixed_image')]),
        (fixed_image_masks, syn, [('out', 'fixed_image_masks')]),
        (syn, outputnode, [('forward_transforms', 'out_warp')]),
        (syn, unwarp_ref, [('forward_transforms', 'transforms')]),
        (inputnode, unwarp_ref, [('bold_ref', 'reference_image'),
                                 ('bold_ref', 'input_image')]),
        (unwarp_ref, skullstrip_bold_wf, [
            ('output_image', 'inputnode.in_file')]),
        (unwarp_ref, outputnode, [('output_image', 'out_reference')]),
        (skullstrip_bold_wf, outputnode, [
            ('outputnode.skull_stripped_file', 'out_reference_brain'),
            ('outputnode.mask_file', 'out_mask')]),
    ])

    return workflow
Ejemplo n.º 4
0
def init_func_preproc_wf(bold_file, ignore, freesurfer,
                         use_bbr, t2s_coreg, bold2t1w_dof, reportlets_dir,
                         output_spaces, template, output_dir, omp_nthreads,
                         fmap_bspline, fmap_demean, use_syn, force_syn,
                         use_aroma, ignore_aroma_err, aroma_melodic_dim,
                         medial_surface_nan, cifti_output,
                         debug, low_mem, template_out_grid,
                         layout=None, num_bold=1):
    """
    This workflow controls the functional preprocessing stages of FMRIPREP.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_func_preproc_wf
        wf = init_func_preproc_wf('/completely/made/up/path/sub-01_task-nback_bold.nii.gz',
                                  omp_nthreads=1,
                                  ignore=[],
                                  freesurfer=True,
                                  reportlets_dir='.',
                                  output_dir='.',
                                  template='MNI152NLin2009cAsym',
                                  output_spaces=['T1w', 'fsnative',
                                                 'template', 'fsaverage5'],
                                  debug=False,
                                  use_bbr=True,
                                  t2s_coreg=False,
                                  bold2t1w_dof=9,
                                  fmap_bspline=True,
                                  fmap_demean=True,
                                  use_syn=True,
                                  force_syn=True,
                                  low_mem=False,
                                  template_out_grid='native',
                                  medial_surface_nan=False,
                                  cifti_output=False,
                                  use_aroma=False,
                                  ignore_aroma_err=False,
                                  aroma_melodic_dim=-200,
                                  num_bold=1)

    **Parameters**

        bold_file : str
            BOLD series NIfTI file
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister) and resampling
            BOLD series to FreeSurfer surface meshes.
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
            When using ``t2s_coreg``, BBR will be enabled by default unless
            explicitly specified otherwise.
        t2s_coreg : bool
            For multiecho EPI, use the calculated T2*-map for T2*-driven coregistration
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        reportlets_dir : str
            Directory in which to save reportlets
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output spaces.

            Valid spaces:

                - T1w
                - template
                - fsnative
                - fsaverage (or other pre-existing FreeSurfer templates)
        template : str
            Name of template targeted by ``template`` output space
        output_dir : str
            Directory in which to save derivatives
        omp_nthreads : int
            Maximum number of threads an individual process may use
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        debug : bool
            Enable debugging outputs
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization
        layout : BIDSLayout
            BIDSLayout structure to enable metadata retrieval
        num_bold : int
            Total number of BOLD files that have been set for preprocessing
            (default is 1)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        t1_preproc
            Bias-corrected structural template image
        t1_brain
            Skull-stripped ``t1_preproc``
        t1_mask
            Mask of the skull-stripped template image
        t1_seg
            Segmentation of preprocessed structural image, including
            gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
        t1_tpms
            List of tissue probability maps in T1w space
        t1_2_mni_forward_transform
            ANTs-compatible affine-and-warp transform file
        t1_2_mni_reverse_transform
            ANTs-compatible affine-and-warp transform file (inverse)
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
        t1_2_fsnative_reverse_transform
            LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w


    **Outputs**

        bold_t1
            BOLD series, resampled to T1w space
        bold_mask_t1
            BOLD series mask in T1w space
        bold_mni
            BOLD series, resampled to template space
        bold_mask_mni
            BOLD series mask in template space
        confounds
            TSV of confounds
        surfaces
            BOLD series, resampled to FreeSurfer surfaces
        aroma_noise_ics
            Noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        bold_cifti
            BOLD CIFTI image
        cifti_variant
            combination of target spaces for `bold_cifti`


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.util.init_bold_reference_wf`
        * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
        * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
        * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_mni_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.pepolar.init_pepolar_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_fmap_estimator_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_sdc_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_nonlinear_sdc_wf`

    """
    from ..fieldmap.base import init_sdc_wf  # Avoid circular dependency (#1066)

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    LOGGER.log(25, ('Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
                    'Memory resampled/largemem=%.2f/%.2f GB.'),
               ref_file, mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # For doc building purposes
    if layout is None or bold_file == 'bold_preprocesing':
        LOGGER.log(25, 'No valid layout: building empty workflow.')
        metadata = {
            'RepetitionTime': 2.0,
            'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
            'PhaseEncodingDirection': 'j',
        }
        fmaps = [{
            'type': 'phasediff',
            'phasediff': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
            'magnitude1': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
            'magnitude2': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
        }]
        run_stc = True
        multiecho = False
    else:
        # Find associated sbref, if possible
        entities = layout.parse_file_entities(ref_file)
        entities['type'] = 'sbref'
        files = layout.get(**entities, extensions=['nii', 'nii.gz'])
        refbase = os.path.basename(ref_file)
        if 'sbref' in ignore:
            LOGGER.info("Single-band reference files ignored.")
        elif files and multiecho:
            LOGGER.warning("Single-band reference found, but not supported in "
                           "multi-echo workflows at this time. Ignoring.")
        elif files:
            sbref_file = files[0].filename
            sbbase = os.path.basename(sbref_file)
            if len(files) > 1:
                LOGGER.warning(
                    "Multiple single-band reference files found for {}; using "
                    "{}".format(refbase, sbbase))
            else:
                LOGGER.log(25, "Using single-band reference file {}".format(sbbase))
        else:
            LOGGER.log(25, "No single-band-reference found for {}".format(refbase))

        metadata = layout.get_metadata(ref_file)

        # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
        fmaps = []
        if 'fieldmaps' not in ignore:
            fmaps = layout.get_fieldmap(ref_file, return_list=True)
            for fmap in fmaps:
                fmap['metadata'] = layout.get_metadata(fmap[fmap['type']])

        # Run SyN if forced or in the absence of fieldmap correction
        if force_syn or (use_syn and not fmaps):
            fmaps.append({'type': 'syn'})

        # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
        run_stc = ("SliceTiming" in metadata and
                   'slicetiming' not in ignore and
                   (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if t2s_coreg and not multiecho:
        LOGGER.warning("No multiecho BOLD images found for T2* coregistration. "
                       "Using standard EPI-T1 coregistration.")
        t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if t2s_coreg and use_bbr is None:
        use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__desc__ = """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=num_bold)

    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and template spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_file', 'sbref_file', 'subjects_dir', 'subject_id',
                't1_preproc', 't1_brain', 't1_mask', 't1_seg', 't1_tpms',
                't1_aseg', 't1_aparc',
                't1_2_mni_forward_transform', 't1_2_mni_reverse_transform',
                't1_2_fsnative_forward_transform', 't1_2_fsnative_reverse_transform']),
        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        inputnode.inputs.sbref_file = sbref_file

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1', 'bold_aparc_t1',
                'bold_mni', 'bold_mni_ref' 'bold_mask_mni', 'bold_aseg_mni', 'bold_aparc_mni',
                'bold_cifti', 'cifti_variant', 'cifti_variant_key', 'confounds', 'surfaces',
                'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file']),
        name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='boldbuffer')

    summary = pe.Node(
        FunctionalSummary(output_spaces=output_spaces,
                          slice_timing=run_stc,
                          registration='FreeSurfer' if freesurfer else 'FSL',
                          registration_dof=bold2t1w_dof,
                          pe_direction=metadata.get("PhaseEncodingDirection")),
        name='summary', mem_gb=DEFAULT_MEMORY_MIN_GB, run_without_submitting=True)

    func_derivatives_wf = init_func_derivatives_wf(output_dir=output_dir,
                                                   output_spaces=output_spaces,
                                                   template=template,
                                                   freesurfer=freesurfer,
                                                   use_aroma=use_aroma,
                                                   cifti_output=cifti_output)

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_mni', 'inputnode.bold_mni'),
            ('bold_mni_ref', 'inputnode.bold_mni_ref'),
            ('bold_aseg_mni', 'inputnode.bold_aseg_mni'),
            ('bold_aparc_mni', 'inputnode.bold_aparc_mni'),
            ('bold_mask_mni', 'inputnode.bold_mask_mni'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surfaces'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_variant_key', 'inputnode.cifti_variant_key')
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=use_bbr,
                                   bold2t1w_dof=bold2t1w_dof,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=(fmaps is not None or use_syn),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not low_mem,
        use_fieldwarp=(fmaps is not None or use_syn),
        name='bold_bold_trans_wf'
    )
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [
                ('outputnode.skip_vols', 'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([
                (bold_reference_wf, bold_stc_wf, [
                    ('outputnode.bold_file', 'inputnode.bold_file')])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([
                (meepi_echos, bold_stc_wf, [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([
            (bold_reference_wf, boldbuffer, [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_wf(
        fmaps, metadata, omp_nthreads=omp_nthreads,
        debug=debug, fmap_demean=fmap_demean, fmap_bspline=fmap_bspline)
    bold_sdc_wf.inputs.inputnode.template = template

    if not fmaps:
        LOGGER.warning('SDC: no fieldmaps found or they were ignored (%s).',
                       ref_file)
    elif fmaps[0]['type'] == 'syn':
        LOGGER.warning(
            'SDC: no fieldmaps found or they were ignored. '
            'Using EXPERIMENTAL "fieldmap-less SyN" correction '
            'for dataset %s.', ref_file)
    else:
        LOGGER.log(25, 'SDC: fieldmap estimation of type "%s" intended for %s found.',
                   fmaps[0]['type'], ref_file)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from .util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(niu.IdentityInterface(fields=['bold_files']),
                                 joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
                                 joinfield=['bold_files'],
                                 name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos, [
                ('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [
                ('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file'),
                                        ('sbref_file', 'inputnode.sbref_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf, [
            ('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
            ('outputnode.bold_file', 'inputnode.bold_file')]),
        # EPI-T1 registration workflow
        (inputnode, bold_reg_wf, [
            ('t1_brain', 'inputnode.t1_brain'),
            ('t1_seg', 'inputnode.t1_seg'),
            # Undefined if --no-freesurfer, but this is safe
            ('subjects_dir', 'inputnode.subjects_dir'),
            ('subject_id', 'inputnode.subject_id'),
            ('t1_2_fsnative_reverse_transform', 'inputnode.t1_2_fsnative_reverse_transform')]),
        (inputnode, bold_t1_trans_wf, [
            ('bold_file', 'inputnode.name_source'),
            ('t1_brain', 'inputnode.t1_brain'),
            ('t1_mask', 'inputnode.t1_mask'),
            ('t1_aseg', 'inputnode.t1_aseg'),
            ('t1_aparc', 'inputnode.t1_aparc')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms', 'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [
            ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode, [('outputnode.bold_t1', 'bold_t1'),
                                        ('outputnode.bold_t1_ref', 'bold_t1_ref'),
                                        ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
                                        ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (inputnode, bold_sdc_wf, [
            ('t1_brain', 'inputnode.t1_brain'),
            ('t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform')]),
        (bold_reference_wf, bold_sdc_wf, [
            ('outputnode.ref_image', 'inputnode.bold_ref'),
            ('outputnode.ref_image_brain', 'inputnode.bold_ref_brain'),
            ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        # For t2s_coreg, replace EPI-to-T1w registration inputs
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_reg_wf, [
            ('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_t1_trans_wf, [
            ('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
            ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp'),
            ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1_tpms', 'inputnode.t1_tpms'),
                                        ('t1_mask', 'inputnode.t1_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [
            ('outputnode.movpar_file', 'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [
            ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [
            ('outputnode.skip_vols', 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [
            ('out_files', 'inputnode.bold_file')]),
        (bold_hmc_wf, bold_bold_trans_wf, [
            ('outputnode.xforms', 'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [
                ('bold_file', 'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold'),
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [
                ('out_files', 'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf, [
                (('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [
                ('outputnode.bold', 'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold'),
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [
                ('outputnode.bold', 'inputnode.bold_split')]),
        ])

    if fmaps:
        from ..fieldmap.unwarp import init_fmap_unwarp_report_wf
        sdc_type = fmaps[0]['type']

        # Report on BOLD correction
        fmap_unwarp_report_wf = init_fmap_unwarp_report_wf(
            suffix='sdc_%s' % sdc_type)
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [
                ('t1_seg', 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf, [
                ('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [
                ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [
                ('outputnode.bold_ref', 'inputnode.in_post')]),
        ])

        if force_syn and sdc_type != 'syn':
            syn_unwarp_report_wf = init_fmap_unwarp_report_wf(
                suffix='forcedsyn', name='syn_unwarp_report_wf')
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [
                    ('t1_seg', 'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf, [
                    ('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf, [
                    ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [
                    ('outputnode.syn_bold_ref', 'inputnode.in_post')]),
            ])

    # Map final BOLD mask into T1w space (if required)
    if 'T1w' in output_spaces:
        from niworkflows.interfaces.fixes import (
            FixHeaderApplyTransforms as ApplyTransforms
        )

        boldmask_to_t1w = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', float=True),
            name='boldmask_to_t1w', mem_gb=0.1
        )
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [
                ('outputnode.itk_bold_to_t1', 'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask_t1', 'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [
                ('output_image', 'bold_mask_t1')]),
        ])

    if 'template' in output_spaces:
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_mni_trans_wf = init_bold_mni_trans_wf(
            template=template,
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            template_out_grid=template_out_grid,
            use_compression=not low_mem,
            use_fieldwarp=fmaps is not None,
            name='bold_mni_trans_wf'
        )
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            name='carpetplot_wf')

        workflow.connect([
            (inputnode, bold_mni_trans_wf, [
                ('bold_file', 'inputnode.name_source'),
                ('t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'),
                ('t1_aseg', 'inputnode.bold_aseg'),
                ('t1_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_mni_trans_wf, [
                ('outputnode.xforms', 'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_mni_trans_wf, [
                ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf, bold_mni_trans_wf, [
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_mni_trans_wf, [
                ('outputnode.out_warp', 'inputnode.fieldwarp')]),
            (bold_mni_trans_wf, outputnode, [('outputnode.bold_mni', 'bold_mni'),
                                             ('outputnode.bold_mni_ref', 'bold_mni_ref'),
                                             ('outputnode.bold_mask_mni', 'bold_mask_mni'),
                                             ('outputnode.bold_aseg_mni', 'bold_aseg_mni'),
                                             ('outputnode.bold_aparc_mni', 'bold_aparc_mni')]),
            (inputnode, carpetplot_wf, [
                ('t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf, carpetplot_wf, [
                ('outputnode.bold', 'inputnode.bold'),
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_reg_wf, carpetplot_wf, [
                ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
            (bold_confounds_wf, carpetplot_wf, [
                ('outputnode.confounds_file', 'inputnode.confounds_file')]),
        ])

        if not multiecho:
            workflow.connect([
                (bold_split, bold_mni_trans_wf, [
                    ('out_files', 'inputnode.bold_split')])
            ])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([
                (bold_t2s_wf, split_opt_comb, [
                    ('outputnode.bold', 'in_file')]),
                (split_opt_comb, bold_mni_trans_wf, [
                    ('out_files', 'inputnode.bold_split')
                ])
            ])

        if use_aroma:
            # ICA-AROMA workflow
            # Internally resamples to MNI152 Linear (2006)
            from .confounds import init_ica_aroma_wf
            from niworkflows.interfaces.utils import JoinTSVColumns

            ica_aroma_wf = init_ica_aroma_wf(
                template=template,
                metadata=metadata,
                mem_gb=mem_gb['resampled'],
                omp_nthreads=omp_nthreads,
                use_fieldwarp=fmaps is not None,
                ignore_aroma_err=ignore_aroma_err,
                aroma_melodic_dim=aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(JoinTSVColumns(), name='aroma_confounds')

            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [
                    ('bold_file', 'inputnode.name_source'),
                    ('t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform')]),
                (bold_split, ica_aroma_wf, [
                    ('out_files', 'inputnode.bold_split')]),
                (bold_hmc_wf, ica_aroma_wf, [
                    ('outputnode.movpar_file', 'inputnode.movpar_file'),
                    ('outputnode.xforms', 'inputnode.hmc_xforms')]),
                (bold_reg_wf, ica_aroma_wf, [
                    ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf, ica_aroma_wf, [
                    ('outputnode.bold_mask', 'inputnode.bold_mask')]),
                (bold_sdc_wf, ica_aroma_wf, [
                    ('outputnode.out_warp', 'inputnode.fieldwarp')]),
                (bold_reference_wf, ica_aroma_wf, [
                    ('outputnode.skip_vols', 'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [
                    ('outputnode.confounds_file', 'in_file')]),
                (ica_aroma_wf, join,
                    [('outputnode.aroma_confounds', 'join_file')]),
                (ica_aroma_wf, outputnode,
                    [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                     ('outputnode.melodic_mix', 'melodic_mix'),
                     ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')]),
                (join, outputnode, [('out_file', 'confounds')]),
            ])

    # SURFACES ##################################################################################
    surface_spaces = [space for space in output_spaces if space.startswith('fs')]
    if freesurfer and surface_spaces:
        LOGGER.log(25, 'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(mem_gb=mem_gb['resampled'],
                                         output_spaces=surface_spaces,
                                         medial_surface_nan=medial_surface_nan,
                                         name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf, [
                ('t1_preproc', 'inputnode.t1_preproc'),
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('t1_2_fsnative_forward_transform', 'inputnode.t1_2_fsnative_forward_transform')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1', 'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
        ])

        # CIFTI output
        if cifti_output and surface_spaces:
            bold_surf_wf.__desc__ += """\
*Grayordinates* files [@hcppipelines], which combine surface-sampled
data and volume-sampled data, were also generated.
"""
            gen_cifti = pe.MapNode(GenerateCifti(), iterfield=["surface_target", "gifti_files"],
                                   name="gen_cifti")
            gen_cifti.inputs.TR = metadata.get("RepetitionTime")
            gen_cifti.inputs.surface_target = [s for s in surface_spaces
                                               if s.startswith('fsaverage')]

            workflow.connect([
                (bold_surf_wf, gen_cifti, [
                    ('outputnode.surfaces', 'gifti_files')]),
                (inputnode, gen_cifti, [('subjects_dir', 'subjects_dir')]),
                (bold_mni_trans_wf, gen_cifti, [('outputnode.bold_mni', 'bold_file')]),
                (gen_cifti, outputnode, [('out_file', 'bold_cifti'),
                                         ('variant', 'cifti_variant'),
                                         ('variant_key', 'cifti_variant_key')]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(
        DerivativesDataSink(suffix='summary'),
        name='ds_report_summary', run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(
        DerivativesDataSink(base_directory=reportlets_dir,
                            suffix='validation'),
        name='ds_report_validation', run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation, [
            ('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 5
0
def init_func_preproc_wf(bold_file, has_fieldmap=False):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep import config
            from fmriprep.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file
    has_fieldmap
        Signals the workflow to use inputnode fieldmap files

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
    bold_ref
        BOLD reference file
    bold_ref_xfm
        Transform file in LTA format from bold to reference
    n_dummy_scans
        Number of nonsteady states at the beginning of the BOLD run

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confs_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.utility import DictMerge

    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10

    # Have some options handy
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces
    nibabies_dir = str(config.execution.nibabies_dir)

    # Extract BIDS entities and metadata from BOLD file(s)
    entities = extract_entities(bold_file)
    layout = config.execution.layout

    # Take first file as reference
    ref_file = pop_file(bold_file)
    metadata = layout.get_metadata(ref_file)
    # get original image orientation
    ref_orientation = get_img_orientation(ref_file)

    echo_idxs = listify(entities.get("echo", []))
    multiecho = len(echo_idxs) > 2
    if len(echo_idxs) == 1:
        config.loggers.workflow.warning(
            f"Running a single echo <{ref_file}> from a seemingly multi-echo dataset."
        )
        bold_file = ref_file  # Just in case - drop the list

    if len(echo_idxs) == 2:
        raise RuntimeError(
            "Multi-echo processing requires at least three different echos (found two)."
        )

    if multiecho:
        # Drop echo entity for future queries, have a boolean shorthand
        entities.pop("echo", None)
        # reorder echoes from shortest to largest
        tes, bold_file = zip(*sorted([(layout.get_metadata(bf)["EchoTime"], bf)
                                      for bf in bold_file]))
        ref_file = bold_file[0]  # Reset reference to be the shortest TE

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        f'Creating bold processing workflow for <{ref_file}> ({mem_gb["filesize"]:.2f} GB '
        f'/ {bold_tlen} TRs). Memory resampled/largemem={mem_gb["resampled"]:.2f}'
        f'/{mem_gb["largemem"]:.2f} GB.')

    # Find associated sbref, if possible
    entities['suffix'] = 'sbref'
    entities['extension'] = ['.nii', '.nii.gz']  # Overwrite extensions
    sbref_files = layout.get(return_type='file', **entities)

    sbref_msg = f"No single-band-reference found for {os.path.basename(ref_file)}."
    if sbref_files and 'sbref' in config.workflow.ignore:
        sbref_msg = "Single-band reference file(s) found and ignored."
    elif sbref_files:
        sbref_msg = "Using single-band reference file(s) {}.".format(','.join(
            [os.path.basename(sbf) for sbf in sbref_files]))
    config.loggers.workflow.info(sbref_msg)

    if has_fieldmap:
        # Search for intended fieldmap
        from pathlib import Path
        import re
        from sdcflows.fieldmaps import get_identifier

        bold_rel = re.sub(r"^sub-[a-zA-Z0-9]*/", "",
                          str(Path(bold_file).relative_to(layout.root)))
        estimator_key = get_identifier(bold_rel)
        if not estimator_key:
            has_fieldmap = False
            config.loggers.workflow.critical(
                f"None of the available B0 fieldmaps are associated to <{bold_rel}>"
            )

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore
               and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'bold_file',
            # from smriprep
            'anat_preproc',
            'anat_brain',
            'anat_mask',
            'anat_dseg',
            'anat_tpms',
            'anat_aseg',
            'anat_aparc',
            'anat2std_xfm',
            'std2anat_xfm',
            'template',
            # from bold reference workflow
            'bold_ref',
            'bold_ref_xfm',
            'n_dummy_scans',
            # from sdcflows (optional)
            'fmap',
            'fmap_ref',
            'fmap_coeff',
            'fmap_mask',
            'fmap_id',
            # if reconstructing with FreeSurfer (optional)
            'anat2fsnative_xfm',
            'fsnative2anat_xfm',
            'subject_id',
            'subjects_dir',
        ]),
        name='inputnode')
    inputnode.inputs.bold_file = bold_file

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_anat', 'bold_anat_ref', 'bold2anat_xfm', 'anat2bold_xfm',
        'bold_mask_anat', 'bold_aseg_anat', 'bold_aparc_anat', 'bold_std',
        'bold_std_ref', 'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std',
        'bold_native', 'bold_cifti', 'cifti_variant', 'cifti_metadata',
        'cifti_density', 'surfaces', 'confounds', 'aroma_noise_ics',
        'melodic_mix', 'nonaggr_denoised_file', 'confounds_metadata'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=config.workflow.bold2t1w_dof,
        registration_init=config.workflow.bold2t1w_init,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        echo_idx=echo_idxs,
        tr=metadata.get("RepetitionTime"),
        orientation=ref_orientation),
                      name='summary',
                      mem_gb=config.DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans
    # TODO: SDC: make dynamic
    summary.inputs.distortion_correction = 'None' if not has_fieldmap else 'TOPUP'

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=nibabies_dir,
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
        debug=config.execution.debug,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_anat', 'inputnode.bold_t1'),
            ('bold_anat_ref', 'inputnode.bold_t1_ref'),
            ('bold2anat_xfm', 'inputnode.bold2anat_xfm'),
            ('anat2bold_xfm', 'inputnode.anat2bold_xfm'),
            ('bold_aseg_anat', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_anat', 'inputnode.bold_aparc_t1'),
            ('bold_mask_anat', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
            ('acompcor_masks', 'inputnode.acompcor_masks'),
            ('tcompcor_mask', 'inputnode.tcompcor_mask'),
        ]),
    ])

    # Extract BOLD validation from init_bold_reference_wf
    val_bold = pe.MapNode(
        ValidateImage(),
        name="val_bold",
        mem_gb=config.DEFAULT_MEMORY_MIN_GB,
        iterfield=["in_file"],
    )
    val_bold.inputs.in_file = listify(bold_file)

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        freesurfer=freesurfer,
        mem_gb=mem_gb['resampled'],
        name='bold_reg_wf',
        omp_nthreads=omp_nthreads,
        sloppy=config.execution.sloppy,
        use_bbr=config.workflow.use_bbr,
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)
    if not has_fieldmap:
        bold_t1_trans_wf.inputs.inputnode.fieldwarp = 'identity'

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        freesurfer=freesurfer,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=False,  # TODO: Fieldwarp is already applied in new sdcflow
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (inputnode, bold_stc_wf, [('n_dummy_scans', 'inputnode.skip_vols')
                                      ]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(val_bold, bold_stc_wf, [
                (("out_file", pop_file), 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(val_bold, boldbuffer, [(("out_file", pop_file),
                                                   'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:  # instantiate relevant interfaces, imports
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        split_opt_comb = bold_split.clone(name='split_opt_comb')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(
                fields=['bold_files', 'skullstripped_bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files', 'skullstripped_bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       name='bold_t2smap_wf')

    # Mask BOLD reference image
    final_boldref_masker = pe.Node(BrainExtraction(),
                                   name='final_boldref_masker')

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (inputnode, bold_hmc_wf, [('bold_ref', 'inputnode.raw_ref_image')]),
        (inputnode, final_boldref_masker, [('bold_ref', 'in_file')]),
        (val_bold, bold_hmc_wf, [(("out_file", pop_file),
                                  'inputnode.bold_file')]),
        (inputnode, summary, [('n_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('anat_dseg', 'inputnode.t1w_dseg'),
                # Undefined if --fs-no-reconall, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('fsnative2anat_xfm', 'inputnode.fsnative2t1w_xfm')
            ]),
        (inputnode, bold_reg_wf, [('anat_brain', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('anat_mask', 'inputnode.t1w_mask'),
                                       ('anat_brain', 'inputnode.t1w_brain'),
                                       ('anat_aseg', 'inputnode.t1w_aseg'),
                                       ('anat_aparc', 'inputnode.t1w_aparc')]),
        (bold_reg_wf, outputnode,
         [('outputnode.itk_bold_to_t1', 'bold2anat_xfm'),
          ('outputnode.itk_t1_to_bold', 'anat2bold_xfm')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_anat'),
          ('outputnode.bold_t1_ref', 'bold_anat_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_anat'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_anat')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('anat_tpms', 'inputnode.t1w_tpms'),
                                        ('anat_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf,
         [('outputnode.movpar_file', 'inputnode.movpar_file'),
          ('outputnode.rmsd_file', 'inputnode.rmsd_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (inputnode, bold_confounds_wf, [('n_dummy_scans',
                                         'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
            ('outputnode.confounds_metadata', 'confounds_metadata'),
            ('outputnode.acompcor_masks', 'acompcor_masks'),
            ('outputnode.tcompcor_mask', 'tcompcor_mask'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        # TODO: Add SDC
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [('outputnode.bold',
                                                      'inputnode.bold')]),
            # (bold_bold_trans_wf, final_boldref_wf, [
            #     ('outputnode.bold', 'inputnode.bold_file')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
            (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                              'inputnode.hmc_xforms')]),
            # (bold_sdc_wf, bold_t1_trans_wf, [
            #     ('outputnode.out_warp', 'inputnode.fieldwarp')])
        ])
    else:  # for meepi, use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, join_echos, [('outputnode.bold', 'bold_files')
                                              ]),
            # (join_echos, final_boldref_wf, [
            #     ('bold_files', 'inputnode.bold_file')]),
            # TODO: Check with multi-echo data
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'skullstripped_bold_files')]),
            (join_echos, bold_t2s_wf, [('skullstripped_bold_files',
                                        'inputnode.bold_file')]),
            (bold_t2s_wf, bold_confounds_wf, [('outputnode.bold',
                                               'inputnode.bold')]),
            (bold_t2s_wf, split_opt_comb, [('outputnode.bold', 'in_file')]),
            (split_opt_comb, bold_t1_trans_wf, [('out_files',
                                                 'inputnode.bold_split')]),
        ])

        # Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
        bold_t1_trans_wf.inputs.inputnode.fieldwarp = 'identity'
        bold_t1_trans_wf.inputs.inputnode.hmc_xforms = 'identity'

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_anat')
                                           ]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (inputnode, func_derivatives_wf, [
                ('bold_ref', 'inputnode.bold_native_ref'),
            ]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf, outputnode,
             [('outputnode.bold', 'bold_native')])
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
        )
        if not has_fieldmap:
            bold_std_trans_wf.inputs.inputnode.fieldwarp = 'identity'

        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('template', 'inputnode.templates'),
              ('anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('anat_aseg', 'inputnode.bold_aseg'),
              ('anat_aparc', 'inputnode.bold_aparc')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            # TODO: Add SDC
            workflow.connect([
                (bold_split, bold_std_trans_wf, [('out_files',
                                                  'inputnode.bold_split')]),
                # (bold_sdc_wf, bold_std_trans_wf, [
                #     ('outputnode.out_warp', 'inputnode.fieldwarp')]),
                (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                                   'inputnode.hmc_xforms')]),
            ])
        else:
            workflow.connect([(split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

            # Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
            bold_std_trans_wf.inputs.inputnode.fieldwarp = 'identity'
            bold_std_trans_wf.inputs.inputnode.hmc_xforms = 'identity'

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (inputnode, ica_aroma_wf, [('n_dummy_scans',
                                            'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug(
            'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('anat2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [('outputnode.target',
                                                  'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [('subjects_dir',
                                                'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode,
                 [('outputnode.cifti_bold', 'bold_cifti'),
                  ('outputnode.cifti_variant', 'cifti_variant'),
                  ('outputnode.cifti_metadata', 'cifti_metadata'),
                  ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        if not config.workflow.cifti_output:
            config.loggers.workflow.critical(
                "The carpetplot requires CIFTI outputs")
        else:
            carpetplot_wf = init_carpetplot_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                cifti_output=bool(config.workflow.cifti_output),
                name='carpetplot_wf')

            workflow.connect([
                (bold_grayords_wf, carpetplot_wf, [('outputnode.cifti_bold',
                                                    'inputnode.cifti_bold')]),
                (bold_confounds_wf, carpetplot_wf,
                 [('outputnode.confounds_file', 'inputnode.confounds_file')]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(
        desc='summary', datatype="figures", dismiss_entities=("echo", )),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        desc='validation', datatype="figures", dismiss_entities=("echo", )),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (val_bold, ds_report_validation, [(("out_report", pop_file), 'in_file')
                                          ]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = nibabies_dir
            workflow.get_node(node).inputs.source_file = ref_file

    # Distortion correction
    if not has_fieldmap:
        # fmt: off
        # Finalize workflow with fieldmap-less connections
        workflow.connect([
            (inputnode, final_boldref_masker, [('bold_ref', 'in_file')]),
            (final_boldref_masker, bold_t1_trans_wf, [
                ('out_mask', 'inputnode.ref_bold_mask'),
                ('out_file', 'inputnode.ref_bold_brain'),
            ]),
            (final_boldref_masker, bold_reg_wf, [
                ('out_file', 'inputnode.ref_bold_brain'),
            ]),
            (final_boldref_masker, bold_confounds_wf,
             [('out_mask', 'inputnode.bold_mask')]),
        ])

        if nonstd_spaces.intersection(('T1w', 'anat')):
            workflow.connect([
                (final_boldref_masker, boldmask_to_t1w, [('out_mask',
                                                          'input_image')]),
            ])
        #         (final_boldref_wf, boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
        #     ])

        if nonstd_spaces.intersection(
            ('func', 'run', 'bold', 'boldref', 'sbref')):
            workflow.connect([
                (final_boldref_masker, func_derivatives_wf,
                 [('out_file', 'inputnode.bold_native_ref'),
                  ('out_mask', 'inputnode.bold_mask_native')]),
            ])
        #         (final_boldref_wf, func_derivatives_wf, [
        #             ('outputnode.ref_image', 'inputnode.bold_native_ref'),
        #             ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        #     ])

        if spaces.get_spaces(nonstandard=False, dim=(3, )):
            workflow.connect([
                (final_boldref_masker, bold_std_trans_wf,
                 [('out_mask', 'inputnode.bold_mask')]),
            ])
        #         (final_boldref_wf, bold_std_trans_wf, [
        #             ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        #     ])

        # fmt: on
        return workflow

    from niworkflows.interfaces.reportlets.registration import (
        SimpleBeforeAfterRPT as SimpleBeforeAfter, )
    from niworkflows.interfaces.utility import KeySelect
    from sdcflows.workflows.apply.registration import init_coeff2epi_wf
    from sdcflows.workflows.apply.correction import init_unwarp_wf

    coeff2epi_wf = init_coeff2epi_wf(
        debug="fieldmaps" in config.execution.debug,
        omp_nthreads=config.nipype.omp_nthreads,
        write_coeff=True,
    )
    unwarp_wf = init_unwarp_wf(debug="fieldmaps" in config.execution.debug,
                               omp_nthreads=config.nipype.omp_nthreads)
    unwarp_wf.inputs.inputnode.metadata = layout.get_metadata(str(bold_file))

    output_select = pe.Node(
        KeySelect(fields=["fmap", "fmap_ref", "fmap_coeff", "fmap_mask"]),
        name="output_select",
        run_without_submitting=True,
    )
    output_select.inputs.key = estimator_key[0]
    if len(estimator_key) > 1:
        config.loggers.workflow.warning(
            f"Several fieldmaps <{', '.join(estimator_key)}> are "
            f"'IntendedFor' <{bold_file}>, using {estimator_key[0]}")

    sdc_report = pe.Node(
        SimpleBeforeAfter(before_label="Distorted", after_label="Corrected"),
        name="sdc_report",
        mem_gb=0.1,
    )

    ds_report_sdc = pe.Node(
        DerivativesDataSink(base_directory=nibabies_dir,
                            desc="sdc",
                            suffix="bold",
                            datatype="figures",
                            dismiss_entities=("echo", )),
        name="ds_report_sdc",
        run_without_submitting=True,
    )

    unwarp_masker = pe.Node(BrainExtraction(), name='unwarp_masker')

    # fmt: off
    workflow.connect([
        (inputnode, output_select, [("fmap", "fmap"), ("fmap_ref", "fmap_ref"),
                                    ("fmap_coeff", "fmap_coeff"),
                                    ("fmap_mask", "fmap_mask"),
                                    ("fmap_id", "keys")]),
        (output_select, coeff2epi_wf, [("fmap_ref", "inputnode.fmap_ref"),
                                       ("fmap_coeff", "inputnode.fmap_coeff"),
                                       ("fmap_mask", "inputnode.fmap_mask")]),
        (inputnode, coeff2epi_wf, [("bold_ref", "inputnode.target_ref")]),
        (final_boldref_masker, coeff2epi_wf, [("out_file",
                                               "inputnode.target_mask")]),
        (inputnode, unwarp_wf, [("bold_ref", "inputnode.distorted")]),
        (coeff2epi_wf, unwarp_wf, [("outputnode.fmap_coeff",
                                    "inputnode.fmap_coeff")]),
        (inputnode, sdc_report, [("bold_ref", "before")]),
        (unwarp_wf, sdc_report, [("outputnode.corrected", "after"),
                                 ("outputnode.corrected_mask", "wm_seg")]),
        (inputnode, ds_report_sdc, [("bold_file", "source_file")]),
        (sdc_report, ds_report_sdc, [("out_report", "in_file")]),
        # remaining workflow connections
        (unwarp_wf, unwarp_masker, [('outputnode.corrected', 'in_file')]),
        (unwarp_masker, bold_confounds_wf, [('out_mask', 'inputnode.bold_mask')
                                            ]),
        (unwarp_masker, bold_t1_trans_wf,
         [('out_mask', 'inputnode.ref_bold_mask'),
          ('out_file', 'inputnode.ref_bold_brain')]),
        # (unwarp_masker, bold_bold_trans_wf, [
        #     ('out_mask', 'inputnode.bold_mask')]),  # Not used within workflow
        (unwarp_masker, bold_reg_wf, [('out_file', 'inputnode.ref_bold_brain')]
         ),
        # TODO: Add distortion correction method to sdcflow outputs?
        # (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
    ])

    if nonstd_spaces.intersection(('T1w', 'anat')):
        workflow.connect([
            (unwarp_masker, boldmask_to_t1w, [('out_mask', 'input_image')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (unwarp_masker, func_derivatives_wf,
             [('out_file', 'inputnode.bold_native_ref'),
              ('out_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        workflow.connect([
            (unwarp_masker, bold_std_trans_wf, [('out_mask',
                                                 'inputnode.bold_mask')]),
        ])
    # fmt: on

    # if not multiecho:
    #     (bold_sdc_wf, bold_t1_trans_wf, [
    #             ('outputnode.out_warp', 'inputnode.fieldwarp')])
    #     (bold_sdc_wf, bold_std_trans_wf, [
    #         ('outputnode.out_warp', 'inputnode.fieldwarp')]),
    # ])
    return workflow
Ejemplo n.º 6
0
def init_func_derivatives_wf(
    bids_root,
    cifti_output,
    freesurfer,
    metadata,
    output_dir,
    spaces,
    use_aroma,
    name='func_derivatives_wf',
):
    """
    Set up a battery of datasinks to store derivatives in the right location.

    Parameters
    ----------
    bids_root : :obj:`str`
        Original BIDS dataset path.
    cifti_output : :obj:`bool`
        Whether the ``--cifti-output`` flag was set.
    freesurfer : :obj:`bool`
        Whether FreeSurfer anatomical processing was run.
    metadata : :obj:`dict`
        Metadata dictionary associated to the BOLD run.
    output_dir : :obj:`str`
        Where derivatives should be written out to.
    spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
        A container for storing, organizing, and parsing spatial normalizations. Composed of
        :py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
        Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
        (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
        (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
        the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
        dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
        would lead to resampling on a 2mm resolution of the space).
    use_aroma : :obj:`bool`
        Whether ``--use-aroma`` flag was set.
    name : :obj:`str`
        This workflow's identifier (default: ``func_derivatives_wf``).

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.utility import KeySelect
    from smriprep.workflows.outputs import _bids_relative

    nonstd_spaces = set(spaces.get_nonstandard())
    workflow = Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'aroma_noise_ics', 'bold_aparc_std', 'bold_aparc_t1', 'bold_aseg_std',
        'bold_aseg_t1', 'bold_cifti', 'bold_mask_std', 'bold_mask_t1', 'bold_std',
        'bold_std_ref', 'bold_t1', 'bold_t1_ref', 'bold_native', 'bold_native_ref',
        'bold_mask_native', 'cifti_variant', 'cifti_metadata', 'cifti_density',
        'confounds', 'confounds_metadata', 'melodic_mix', 'nonaggr_denoised_file',
        'source_file', 'surf_files', 'surf_refs', 'template', 'spatial_reference']),
        name='inputnode')

    raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources')
    raw_sources.inputs.bids_root = bids_root

    ds_confounds = pe.Node(DerivativesDataSink(
        base_directory=output_dir, desc='confounds', suffix='regressors',
        dismiss_entities=("echo",)),
        name="ds_confounds", run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)
    workflow.connect([
        (inputnode, raw_sources, [('source_file', 'in_files')]),
        (inputnode, ds_confounds, [('source_file', 'source_file'),
                                   ('confounds', 'in_file'),
                                   ('confounds_metadata', 'meta_dict')]),
    ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        ds_bold_native = pe.Node(
            DerivativesDataSink(
                base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
                RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'),
                dismiss_entities=("echo",)),
            name='ds_bold_native', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_native_ref = pe.Node(
            DerivativesDataSink(base_directory=output_dir, suffix='boldref', compress=True,
                                dismiss_entities=("echo",)),
            name='ds_bold_native_ref', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_mask_native = pe.Node(
            DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
                                compress=True, dismiss_entities=("echo",)),
            name='ds_bold_mask_native', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, ds_bold_native, [('source_file', 'source_file'),
                                         ('bold_native', 'in_file')]),
            (inputnode, ds_bold_native_ref, [('source_file', 'source_file'),
                                             ('bold_native_ref', 'in_file')]),
            (inputnode, ds_bold_mask_native, [('source_file', 'source_file'),
                                              ('bold_mask_native', 'in_file')]),
            (raw_sources, ds_bold_mask_native, [('out', 'RawSources')]),
        ])

    # Resample to T1w space
    if nonstd_spaces.intersection(('T1w', 'anat')):
        ds_bold_t1 = pe.Node(
            DerivativesDataSink(
                base_directory=output_dir, space='T1w', desc='preproc', compress=True,
                SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'),
                TaskName=metadata.get('TaskName'), dismiss_entities=("echo",)),
            name='ds_bold_t1', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_t1_ref = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space='T1w', suffix='boldref',
                                compress=True, dismiss_entities=("echo",)),
            name='ds_bold_t1_ref', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        ds_bold_mask_t1 = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space='T1w', desc='brain',
                                suffix='mask', compress=True, dismiss_entities=("echo",)),
            name='ds_bold_mask_t1', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, ds_bold_t1, [('source_file', 'source_file'),
                                     ('bold_t1', 'in_file')]),
            (inputnode, ds_bold_t1_ref, [('source_file', 'source_file'),
                                         ('bold_t1_ref', 'in_file')]),
            (inputnode, ds_bold_mask_t1, [('source_file', 'source_file'),
                                          ('bold_mask_t1', 'in_file')]),
            (raw_sources, ds_bold_mask_t1, [('out', 'RawSources')]),
        ])
        if freesurfer:
            ds_bold_aseg_t1 = pe.Node(DerivativesDataSink(
                base_directory=output_dir, space='T1w', desc='aseg', suffix='dseg',
                compress=True, dismiss_entities=("echo",)),
                name='ds_bold_aseg_t1', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            ds_bold_aparc_t1 = pe.Node(DerivativesDataSink(
                base_directory=output_dir, space='T1w', desc='aparcaseg', suffix='dseg',
                compress=True, dismiss_entities=("echo",)),
                name='ds_bold_aparc_t1', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            workflow.connect([
                (inputnode, ds_bold_aseg_t1, [('source_file', 'source_file'),
                                              ('bold_aseg_t1', 'in_file')]),
                (inputnode, ds_bold_aparc_t1, [('source_file', 'source_file'),
                                               ('bold_aparc_t1', 'in_file')]),
            ])

    if use_aroma:
        ds_aroma_noise_ics = pe.Node(DerivativesDataSink(
            base_directory=output_dir, suffix='AROMAnoiseICs', dismiss_entities=("echo",)),
            name="ds_aroma_noise_ics", run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_melodic_mix = pe.Node(DerivativesDataSink(
            base_directory=output_dir, desc='MELODIC', suffix='mixing',
            dismiss_entities=("echo",)),
            name="ds_melodic_mix", run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_aroma_std = pe.Node(
            DerivativesDataSink(
                base_directory=output_dir, space='MNI152NLin6Asym', desc='smoothAROMAnonaggr',
                compress=True),
            name='ds_aroma_std', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, ds_aroma_noise_ics, [('source_file', 'source_file'),
                                             ('aroma_noise_ics', 'in_file')]),
            (inputnode, ds_melodic_mix, [('source_file', 'source_file'),
                                         ('melodic_mix', 'in_file')]),
            (inputnode, ds_aroma_std, [('source_file', 'source_file'),
                                       ('nonaggr_denoised_file', 'in_file')]),
        ])

    if getattr(spaces, '_cached') is None:
        return workflow

    # Store resamplings in standard spaces when listed in --output-spaces
    if spaces.cached.references:
        from niworkflows.interfaces.space import SpaceDataSource

        spacesource = pe.Node(SpaceDataSource(),
                              name='spacesource', run_without_submitting=True)
        spacesource.iterables = ('in_tuple', [
            (s.fullname, s.spec) for s in spaces.cached.get_standard(dim=(3,))
        ])

        select_std = pe.Node(KeySelect(
            fields=['template', 'bold_std', 'bold_std_ref', 'bold_mask_std']),
            name='select_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)

        ds_bold_std = pe.Node(
            DerivativesDataSink(
                base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
                RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'),
                dismiss_entities=("echo",)),
            name='ds_bold_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_std_ref = pe.Node(
            DerivativesDataSink(base_directory=output_dir, suffix='boldref', compress=True,
                                dismiss_entities=("echo",)),
            name='ds_bold_std_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_mask_std = pe.Node(
            DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
                                compress=True, dismiss_entities=("echo",)),
            name='ds_bold_mask_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, ds_bold_std, [('source_file', 'source_file')]),
            (inputnode, ds_bold_std_ref, [('source_file', 'source_file')]),
            (inputnode, ds_bold_mask_std, [('source_file', 'source_file')]),
            (inputnode, select_std, [('bold_std', 'bold_std'),
                                     ('bold_std_ref', 'bold_std_ref'),
                                     ('bold_mask_std', 'bold_mask_std'),
                                     ('template', 'template'),
                                     ('spatial_reference', 'keys')]),
            (spacesource, select_std, [('uid', 'key')]),
            (select_std, ds_bold_std, [('bold_std', 'in_file')]),
            (spacesource, ds_bold_std, [('space', 'space'),
                                        ('cohort', 'cohort'),
                                        ('resolution', 'resolution'),
                                        ('density', 'density')]),
            (select_std, ds_bold_std_ref, [('bold_std_ref', 'in_file')]),
            (spacesource, ds_bold_std_ref, [('space', 'space'),
                                            ('cohort', 'cohort'),
                                            ('resolution', 'resolution'),
                                            ('density', 'density')]),
            (select_std, ds_bold_mask_std, [('bold_mask_std', 'in_file')]),
            (spacesource, ds_bold_mask_std, [('space', 'space'),
                                             ('cohort', 'cohort'),
                                             ('resolution', 'resolution'),
                                             ('density', 'density')]),
            (raw_sources, ds_bold_mask_std, [('out', 'RawSources')]),
        ])

        if freesurfer:
            select_fs_std = pe.Node(KeySelect(
                fields=['bold_aseg_std', 'bold_aparc_std', 'template']),
                name='select_fs_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
            ds_bold_aseg_std = pe.Node(DerivativesDataSink(
                base_directory=output_dir, desc='aseg', suffix='dseg', compress=True,
                dismiss_entities=("echo",)),
                name='ds_bold_aseg_std', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            ds_bold_aparc_std = pe.Node(DerivativesDataSink(
                base_directory=output_dir, desc='aparcaseg', suffix='dseg', compress=True,
                dismiss_entities=("echo",)),
                name='ds_bold_aparc_std', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            workflow.connect([
                (spacesource, select_fs_std, [('uid', 'key')]),
                (inputnode, select_fs_std, [('bold_aseg_std', 'bold_aseg_std'),
                                            ('bold_aparc_std', 'bold_aparc_std'),
                                            ('template', 'template'),
                                            ('spatial_reference', 'keys')]),
                (select_fs_std, ds_bold_aseg_std, [('bold_aseg_std', 'in_file')]),
                (spacesource, ds_bold_aseg_std, [('space', 'space'),
                                                 ('cohort', 'cohort'),
                                                 ('resolution', 'resolution'),
                                                 ('density', 'density')]),
                (select_fs_std, ds_bold_aparc_std, [('bold_aparc_std', 'in_file')]),
                (spacesource, ds_bold_aparc_std, [('space', 'space'),
                                                  ('cohort', 'cohort'),
                                                  ('resolution', 'resolution'),
                                                  ('density', 'density')]),
                (inputnode, ds_bold_aseg_std, [('source_file', 'source_file')]),
                (inputnode, ds_bold_aparc_std, [('source_file', 'source_file')])
            ])

    fs_outputs = spaces.cached.get_fs_spaces()
    if freesurfer and fs_outputs:
        from niworkflows.interfaces.surf import Path2BIDS

        select_fs_surf = pe.Node(KeySelect(
            fields=['surfaces', 'surf_kwargs']), name='select_fs_surf',
            run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
        select_fs_surf.iterables = [('key', fs_outputs)]
        select_fs_surf.inputs.surf_kwargs = [{'space': s} for s in fs_outputs]

        name_surfs = pe.MapNode(Path2BIDS(pattern=r'(?P<hemi>[lr])h.\w+'),
                                iterfield='in_file', name='name_surfs',
                                run_without_submitting=True)

        ds_bold_surfs = pe.MapNode(DerivativesDataSink(
            base_directory=output_dir, extension="func.gii", dismiss_entities=("echo",)),
            iterfield=['in_file', 'hemi'], name='ds_bold_surfs',
            run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, select_fs_surf, [
                ('surf_files', 'surfaces'),
                ('surf_refs', 'keys')]),
            (select_fs_surf, name_surfs, [('surfaces', 'in_file')]),
            (inputnode, ds_bold_surfs, [('source_file', 'source_file')]),
            (select_fs_surf, ds_bold_surfs, [('surfaces', 'in_file'),
                                             ('key', 'space')]),
            (name_surfs, ds_bold_surfs, [('hemi', 'hemi')]),
        ])

    # CIFTI output
    if cifti_output:
        ds_bold_cifti = pe.Node(DerivativesDataSink(
            base_directory=output_dir, suffix='bold', compress=False, dismiss_entities=("echo",)),
            name='ds_bold_cifti', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, ds_bold_cifti, [(('bold_cifti', _unlist), 'in_file'),
                                        ('source_file', 'source_file'),
                                        (('cifti_metadata', _get_surface), 'space'),
                                        ('cifti_density', 'density'),
                                        (('cifti_metadata', _read_json), 'meta_dict')])
        ])

    return workflow
def init_bold_t1_trans_wf(freesurfer, mem_gb, omp_nthreads, multiecho=False, use_fieldwarp=False,
                          use_compression=True, name='bold_t1_trans_wf'):
    """
    This workflow registers the reference BOLD image to T1-space, using a
    boundary-based registration (BBR) cost function.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.registration import init_bold_t1_trans_wf
        wf = init_bold_t1_trans_wf(freesurfer=True,
                                   mem_gb=3,
                                   omp_nthreads=1)

    **Parameters**

        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister)
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to T1
        multiecho : bool
            If multiecho data was supplied, HMC already performed
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        name : str
            Name of workflow (default: ``bold_reg_wf``)

    **Inputs**

        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        ref_bold_brain
            Reference image to which BOLD series is aligned
            If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
        ref_bold_mask
            Skull-stripping mask of reference image
        t1_brain
            Skull-stripped bias-corrected structural template image
        t1_mask
            Mask of the skull-stripped template image
        t1_aseg
            FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        t1_aparc
            FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        bold_split
            Individual 3D BOLD volumes, not motion corrected
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format

    **Outputs**

        bold_t1
            Motion-corrected BOLD series in T1 space
        bold_t1_ref
            Reference, contrast-enhanced summary of the motion-corrected BOLD series in T1w space
        bold_mask_t1
            BOLD mask in T1 space
        bold_aseg_t1
            FreeSurfer's ``aseg.mgz`` atlas, in T1w-space at the BOLD resolution
            (only if ``recon-all`` was run).
        bold_aparc_t1
            FreeSurfer's ``aparc+aseg.mgz`` atlas, in T1w-space at the BOLD resolution
            (only if ``recon-all`` was run).


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`

    """
    from .util import init_bold_reference_wf
    workflow = Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['name_source', 'ref_bold_brain', 'ref_bold_mask',
                    't1_brain', 't1_mask', 't1_aseg', 't1_aparc',
                    'bold_split', 'fieldwarp', 'hmc_xforms',
                    'itk_bold_to_t1']),
        name='inputnode'
    )

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'bold_t1', 'bold_t1_ref', 'bold_mask_t1',
            'bold_aseg_t1', 'bold_aparc_t1']),
        name='outputnode'
    )

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB

    mask_t1w_tfm = pe.Node(
        ApplyTransforms(interpolation='MultiLabel', float=True),
        name='mask_t1w_tfm', mem_gb=0.1
    )

    workflow.connect([
        (inputnode, gen_ref, [('ref_bold_brain', 'moving_image'),
                              ('t1_brain', 'fixed_image'),
                              ('t1_mask', 'fov_mask')]),
        (inputnode, mask_t1w_tfm, [('ref_bold_mask', 'input_image')]),
        (gen_ref, mask_t1w_tfm, [('out_file', 'reference_image')]),
        (inputnode, mask_t1w_tfm, [('itk_bold_to_t1', 'transforms')]),
        (mask_t1w_tfm, outputnode, [('output_image', 'bold_mask_t1')]),
    ])

    if freesurfer:
        # Resample aseg and aparc in T1w space (no transforms needed)
        aseg_t1w_tfm = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', transforms='identity', float=True),
            name='aseg_t1w_tfm', mem_gb=0.1)
        aparc_t1w_tfm = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', transforms='identity', float=True),
            name='aparc_t1w_tfm', mem_gb=0.1)

        workflow.connect([
            (inputnode, aseg_t1w_tfm, [('t1_aseg', 'input_image')]),
            (inputnode, aparc_t1w_tfm, [('t1_aparc', 'input_image')]),
            (gen_ref, aseg_t1w_tfm, [('out_file', 'reference_image')]),
            (gen_ref, aparc_t1w_tfm, [('out_file', 'reference_image')]),
            (aseg_t1w_tfm, outputnode, [('output_image', 'bold_aseg_t1')]),
            (aparc_t1w_tfm, outputnode, [('output_image', 'bold_aparc_t1')]),
        ])

    bold_to_t1w_transform = pe.Node(
        MultiApplyTransforms(interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
        name='bold_to_t1w_transform', mem_gb=mem_gb * 3 * omp_nthreads, n_procs=omp_nthreads)

    # merge 3D volumes into 4D timeseries
    merge = pe.Node(Merge(compress=use_compression), name='merge', mem_gb=mem_gb)

    # Generate a reference on the target T1w space
    gen_final_ref = init_bold_reference_wf(omp_nthreads, pre_mask=True)

    if not multiecho:
        # Merge transforms placing the head motion correction last
        nforms = 2 + int(use_fieldwarp)
        merge_xforms = pe.Node(niu.Merge(nforms), name='merge_xforms',
                               run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
        if use_fieldwarp:
            workflow.connect([
                (inputnode, merge_xforms, [('fieldwarp', 'in2')])
            ])

        workflow.connect([
            # merge transforms
            (inputnode, merge_xforms, [
                ('hmc_xforms', 'in%d' % nforms),
                ('itk_bold_to_t1', 'in1')]),
            (merge_xforms, bold_to_t1w_transform, [('out', 'transforms')]),
            (inputnode, bold_to_t1w_transform, [('bold_split', 'input_image')]),
        ])

    else:
        from nipype.interfaces.fsl import Split as FSLSplit
        bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, bold_split, [('bold_split', 'in_file')]),
            (bold_split, bold_to_t1w_transform, [('out_files', 'input_image')]),
            (inputnode, bold_to_t1w_transform, [('itk_bold_to_t1', 'transforms')]),
        ])

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (gen_ref, bold_to_t1w_transform, [('out_file', 'reference_image')]),
        (bold_to_t1w_transform, merge, [('out_files', 'in_files')]),
        (merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
        (mask_t1w_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')]),
        (merge, outputnode, [('out_file', 'bold_t1')]),
        (gen_final_ref, outputnode, [('outputnode.ref_image', 'bold_t1_ref')]),
    ])

    return workflow
Ejemplo n.º 8
0
def init_fmap_postproc_wf(omp_nthreads,
                          fmap_bspline,
                          median_kernel_size=5,
                          name='fmap_postproc_wf'):
    """
    Postprocess a B0 map estimated elsewhere.

    This workflow denoises (mostly via smoothing) a B0 fieldmap.

    Workflow Graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes

            from sdcflows.workflows.fmap import init_fmap_postproc_wf
            wf = init_fmap_postproc_wf(omp_nthreads=6, fmap_bspline=False)

    Parameters
    ----------
    omp_nthreads : int
        Maximum number of threads an individual process may use
    fmap_bspline : bool
        Whether the fieldmap should be smoothed and extrapolated to off-brain regions
        using B-Spline basis.
    median_kernel_size : int
        Size of the kernel when smoothing is done with a median filter.
    name : str
        Name of workflow (default: ``fmap_postproc_wf``)

    Inputs
    ------
    fmap_mask : pathlike
        A brain binary mask corresponding to this fieldmap.
    fmap_ref : pathlike
        A preprocessed magnitude/reference image for the fieldmap.
    fmap : pathlike
        A B0-field nonuniformity map (aka fieldmap) estimated elsewhere.

    Outputs
    -------
    out_fmap : pathlike
        Postprocessed fieldmap.

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['fmap_mask', 'fmap_ref', 'fmap', 'metadata']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['out_fmap', 'metadata']),
        name='outputnode')
    if fmap_bspline:
        from ..interfaces.fmap import FieldEnhance
        # despike_threshold=1.0, mask_erode=1),
        fmapenh = pe.Node(FieldEnhance(unwrap=False, despike=False),
                          name='fmapenh',
                          mem_gb=4,
                          n_procs=omp_nthreads)

        workflow.connect([
            (inputnode, fmapenh, [('fmap_mask', 'in_mask'),
                                  ('fmap_ref', 'in_magnitude'),
                                  ('fmap_hz', 'in_file')]),
            (fmapenh, outputnode, [('out_file', 'out_fmap')]),
        ])

    else:
        recenter = pe.Node(niu.Function(function=_recenter),
                           name='recenter',
                           run_without_submitting=True)
        denoise = pe.Node(fsl.SpatialFilter(operation='median',
                                            kernel_shape='sphere',
                                            kernel_size=median_kernel_size),
                          name='denoise')
        demean = pe.Node(niu.Function(function=_demean), name='demean')
        cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf")

        workflow.connect([
            (inputnode, cleanup_wf, [('fmap_mask', 'inputnode.in_mask')]),
            (inputnode, recenter, [(('fmap', _pop), 'in_file')]),
            (recenter, denoise, [('out', 'in_file')]),
            (denoise, demean, [('out_file', 'in_file')]),
            (demean, cleanup_wf, [('out', 'inputnode.in_file')]),
            (cleanup_wf, outputnode, [('outputnode.out_file', 'out_fmap')]),
            (inputnode, outputnode, [(('metadata', _pop), 'metadata')]),
        ])

    return workflow
Ejemplo n.º 9
0
def init_bold_reference_wf(omp_nthreads, bold_file=None, pre_mask=False,
                           name='bold_reference_wf', gen_report=False):
    """
    This workflow generates reference BOLD images for a series

    The raw reference image is the target of :abbr:`HMC (head motion correction)`, and a
    contrast-enhanced reference is the subject of distortion correction, as well as
    boundary-based registration to T1w and template spaces.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_reference_wf
        wf = init_bold_reference_wf(omp_nthreads=1)

    **Parameters**

        bold_file : str
            BOLD series NIfTI file
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_reference_wf``)
        gen_report : bool
            Whether a mask report node should be appended in the end
        enhance_t2 : bool
            Perform logarithmic transform of input BOLD image to improve contrast
            before calculating the preliminary mask

    **Inputs**

        bold_file
            BOLD series NIfTI file
        bold_mask : bool
            A tentative brain mask to initialize the workflow (requires ``pre_mask``
            parameter set ``True``).

    **Outputs**

        bold_file
            Validated BOLD series NIfTI file
        raw_ref_image
            Reference image to which BOLD series is motion corrected
        skip_vols
            Number of non-steady-state volumes detected at beginning of ``bold_file``
        ref_image
            Contrast-enhanced reference image
        ref_image_brain
            Skull-stripped reference image
        bold_mask
            Skull-stripping mask of reference image
        validation_report
            HTML reportlet indicating whether ``bold_file`` had a valid affine


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.util.init_enhance_and_skullstrip_wf`

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
First, a reference volume and its skull-stripped version were generated
using a custom methodology of *fMRIPrep*.
"""
    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'sbref_file', 'bold_mask']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['bold_file', 'raw_ref_image', 'skip_vols', 'ref_image',
                                      'ref_image_brain', 'bold_mask', 'validation_report',
                                      'mask_report']),
        name='outputnode')

    # Simplify manually setting input image
    if bold_file is not None:
        inputnode.inputs.bold_file = bold_file

    validate = pe.Node(ValidateImage(), name='validate', mem_gb=DEFAULT_MEMORY_MIN_GB)

    gen_ref = pe.Node(EstimateReferenceImage(), name="gen_ref",
                      mem_gb=1)  # OE: 128x128x128x50 * 64 / 8 ~ 900MB.
    # Re-run validation; no effect if no sbref; otherwise apply same validation to sbref as bold
    validate_ref = pe.Node(ValidateImage(), name='validate_ref', mem_gb=DEFAULT_MEMORY_MIN_GB)
    enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(
        omp_nthreads=omp_nthreads, pre_mask=pre_mask)

    workflow.connect([
        (inputnode, enhance_and_skullstrip_bold_wf, [('bold_mask', 'inputnode.pre_mask')]),
        (inputnode, validate, [('bold_file', 'in_file')]),
        (inputnode, gen_ref, [('sbref_file', 'sbref_file')]),
        (validate, gen_ref, [('out_file', 'in_file')]),
        (gen_ref, validate_ref, [('ref_image', 'in_file')]),
        (validate_ref, enhance_and_skullstrip_bold_wf, [('out_file', 'inputnode.in_file')]),
        (validate, outputnode, [('out_file', 'bold_file'),
                                ('out_report', 'validation_report')]),
        (gen_ref, outputnode, [('n_volumes_to_discard', 'skip_vols')]),
        (validate_ref, outputnode, [('out_file', 'raw_ref_image')]),
        (enhance_and_skullstrip_bold_wf, outputnode, [
            ('outputnode.bias_corrected_file', 'ref_image'),
            ('outputnode.mask_file', 'bold_mask'),
            ('outputnode.skull_stripped_file', 'ref_image_brain')]),
    ])

    if gen_report:
        mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')
        workflow.connect([
            (enhance_and_skullstrip_bold_wf, mask_reportlet, [
                ('outputnode.bias_corrected_file', 'background_file'),
                ('outputnode.mask_file', 'mask_file'),
            ]),
        ])

    return workflow
Ejemplo n.º 10
0
def init_skullstrip_bold_wf(name='skullstrip_bold_wf'):
    """
    This workflow applies skull-stripping to a BOLD image.

    It is intended to be used on an image that has previously been
    bias-corrected with
    :py:func:`~fmriprep.workflows.bold.util.init_enhance_and_skullstrip_bold_wf`

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.util import init_skullstrip_bold_wf
        wf = init_skullstrip_bold_wf()


    Inputs

        in_file
            BOLD image (single volume)


    Outputs

        skull_stripped_file
            the ``in_file`` after skull-stripping
        mask_file
            mask of the skull-stripped input file
        out_report
            reportlet for the skull-stripping

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['mask_file',
                                                       'skull_stripped_file',
                                                       'out_report']),
                         name='outputnode')
    skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
                                    name='skullstrip_first_pass')
    skullstrip_second_pass = pe.Node(afni.Automask(dilate=1, outputtype='NIFTI_GZ'),
                                     name='skullstrip_second_pass')
    combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks')
    apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')
    mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')

    workflow.connect([
        (inputnode, skullstrip_first_pass, [('in_file', 'in_file')]),
        (skullstrip_first_pass, skullstrip_second_pass, [('out_file', 'in_file')]),
        (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
        (skullstrip_second_pass, combine_masks, [('out_file', 'operand_file')]),
        (combine_masks, outputnode, [('out_file', 'mask_file')]),
        # Masked file
        (inputnode, apply_mask, [('in_file', 'in_file')]),
        (combine_masks, apply_mask, [('out_file', 'mask_file')]),
        (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
        # Reportlet
        (inputnode, mask_reportlet, [('in_file', 'background_file')]),
        (combine_masks, mask_reportlet, [('out_file', 'mask_file')]),
        (mask_reportlet, outputnode, [('out_report', 'out_report')]),
    ])

    return workflow
Ejemplo n.º 11
0
def init_enhance_and_skullstrip_bold_wf(
        name='enhance_and_skullstrip_bold_wf',
        pre_mask=False,
        omp_nthreads=1):
    """
    This workflow takes in a :abbr:`BOLD (blood-oxygen level-dependant)`
    :abbr:`fMRI (functional MRI)` average/summary (e.g. a reference image
    averaging non-steady-state timepoints), and sharpens the histogram
    with the application of the N4 algorithm for removing the
    :abbr:`INU (intensity non-uniformity)` bias field and calculates a signal
    mask.

    Steps of this workflow are:

      1. Calculate a tentative mask by registering (9-parameters) to *fMRIPrep*'s
         :abbr:`EPI (echo-planar imaging)` -*boldref* template, which
         is in MNI space.
         The tentative mask is obtained by resampling the MNI template's
         brainmask into *boldref*-space.
      2. Binary dilation of the tentative mask with a sphere of 3mm diameter.
      3. Run ANTs' ``N4BiasFieldCorrection`` on the input
         :abbr:`BOLD (blood-oxygen level-dependant)` average, using the
         mask generated in 1) instead of the internal Otsu thresholding.
      4. Calculate a loose mask using FSL's ``bet``, with one mathematical morphology
         dilation of one iteration and a sphere of 6mm as structuring element.
      5. Mask the :abbr:`INU (intensity non-uniformity)`-corrected image
         with the latest mask calculated in 3), then use AFNI's ``3dUnifize``
         to *standardize* the T2* contrast distribution.
      6. Calculate a mask using AFNI's ``3dAutomask`` after the contrast
         enhancement of 4).
      7. Calculate a final mask as the intersection of 4) and 6).
      8. Apply final mask on the enhanced reference.

    Step 1 can be skipped if the ``pre_mask`` argument is set to ``True`` and
    a tentative mask is passed in to the workflow throught the ``pre_mask``
    Nipype input.


    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.util import init_enhance_and_skullstrip_bold_wf
        wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=1)

    **Parameters**
        name : str
            Name of workflow (default: ``enhance_and_skullstrip_bold_wf``)
        pre_mask : bool
            Indicates whether the ``pre_mask`` input will be set (and thus, step 1
            should be skipped).
        omp_nthreads : int
            number of threads available to parallel nodes

    **Inputs**

        in_file
            BOLD image (single volume)
        pre_mask : bool
            A tentative brain mask to initialize the workflow (requires ``pre_mask``
            parameter set ``True``).


    **Outputs**

        bias_corrected_file
            the ``in_file`` after `N4BiasFieldCorrection`_
        skull_stripped_file
            the ``bias_corrected_file`` after skull-stripping
        mask_file
            mask of the skull-stripped input file
        out_report
            reportlet for the skull-stripping

    .. _N4BiasFieldCorrection: https://hdl.handle.net/10380/3053
    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'mask_file', 'skull_stripped_file', 'bias_corrected_file']), name='outputnode')

    # Dilate pre_mask
    pre_dilate = pe.Node(fsl.DilateImage(
        operation='max', kernel_shape='sphere', kernel_size=3.0,
        internal_datatype='char'), name='pre_mask_dilate')

    # Ensure mask's header matches reference's
    check_hdr = pe.Node(MatchHeader(), name='check_hdr',
                        run_without_submitting=True)

    # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
                         name='n4_correct', n_procs=1)

    # Create a generous BET mask out of the bias-corrected EPI
    skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True),
                                    name='skullstrip_first_pass')
    bet_dilate = pe.Node(fsl.DilateImage(
        operation='max', kernel_shape='sphere', kernel_size=6.0,
        internal_datatype='char'), name='skullstrip_first_dilate')
    bet_mask = pe.Node(fsl.ApplyMask(), name='skullstrip_first_mask')

    # Use AFNI's unifize for T2 constrast & fix header
    unifize = pe.Node(afni.Unifize(
        t2=True, outputtype='NIFTI_GZ',
        # Default -clfrac is 0.1, 0.4 was too conservative
        # -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
        args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
        out_file="uni.nii.gz"), name='unifize')
    fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1)

    # Run ANFI's 3dAutomask to extract a refined brain mask
    skullstrip_second_pass = pe.Node(afni.Automask(dilate=1,
                                                   outputtype='NIFTI_GZ'),
                                     name='skullstrip_second_pass')
    fixhdr_skullstrip2 = pe.Node(CopyXForm(), name='fixhdr_skullstrip2', mem_gb=0.1)

    # Take intersection of both masks
    combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
                            name='combine_masks')

    # Compute masked brain
    apply_mask = pe.Node(fsl.ApplyMask(), name='apply_mask')

    if not pre_mask:
        bold_template = get_template('fMRIPrep') / 'tpl-fMRIPrep_space-MNI_res-02_boldref.nii.gz'
        brain_mask = get_template('MNI152NLin2009cAsym') / \
            'tpl-MNI152NLin2009cAsym_space-MNI_res-02_brainmask.nii.gz'

        # Initialize transforms with antsAI
        init_aff = pe.Node(AI(
            fixed_image=str(bold_template),
            fixed_image_mask=str(brain_mask),
            metric=('Mattes', 32, 'Regular', 0.2),
            transform=('Affine', 0.1),
            search_factor=(20, 0.12),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            verbose=True),
            name='init_aff',
            n_procs=omp_nthreads)

        # Registration().version may be None
        if parseversion(Registration().version or '0.0.0') > Version('2.2.0'):
            init_aff.inputs.search_grid = (40, (0, 40, 40))

        # Set up spatial normalization
        norm = pe.Node(Registration(
            from_file=pkgr_fn(
                'fmriprep.data',
                'epi_atlasbased_brainmask.json')),
            name='norm',
            n_procs=omp_nthreads)
        norm.inputs.fixed_image = str(bold_template)
        map_brainmask = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', float=True, input_image=str(brain_mask)),
            name='map_brainmask'
        )
        workflow.connect([
            (inputnode, init_aff, [('in_file', 'moving_image')]),
            (inputnode, map_brainmask, [('in_file', 'reference_image')]),
            (inputnode, norm, [('in_file', 'moving_image')]),
            (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
            (norm, map_brainmask, [
                ('reverse_invert_flags', 'invert_transform_flags'),
                ('reverse_transforms', 'transforms')]),
            (map_brainmask, pre_dilate, [('output_image', 'in_file')]),
        ])
    else:
        workflow.connect([
            (inputnode, pre_dilate, [('pre_mask', 'in_file')]),
        ])

    workflow.connect([
        (inputnode, check_hdr, [('in_file', 'reference')]),
        (pre_dilate, check_hdr, [('out_file', 'in_file')]),
        (check_hdr, n4_correct, [('out_file', 'mask_image')]),
        (inputnode, n4_correct, [('in_file', 'input_image')]),
        (inputnode, fixhdr_unifize, [('in_file', 'hdr_file')]),
        (inputnode, fixhdr_skullstrip2, [('in_file', 'hdr_file')]),
        (n4_correct, skullstrip_first_pass, [('output_image', 'in_file')]),
        (skullstrip_first_pass, bet_dilate, [('mask_file', 'in_file')]),
        (bet_dilate, bet_mask, [('out_file', 'mask_file')]),
        (skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
        (bet_mask, unifize, [('out_file', 'in_file')]),
        (unifize, fixhdr_unifize, [('out_file', 'in_file')]),
        (fixhdr_unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
        (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
        (skullstrip_second_pass, fixhdr_skullstrip2, [('out_file', 'in_file')]),
        (fixhdr_skullstrip2, combine_masks, [('out_file', 'operand_file')]),
        (fixhdr_unifize, apply_mask, [('out_file', 'in_file')]),
        (combine_masks, apply_mask, [('out_file', 'mask_file')]),
        (combine_masks, outputnode, [('out_file', 'mask_file')]),
        (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]),
        (n4_correct, outputnode, [('output_image', 'bias_corrected_file')]),
    ])

    return workflow
Ejemplo n.º 12
0
def init_bold_stc_wf(metadata, name='bold_stc_wf'):
    """
    This workflow performs :abbr:`STC (slice-timing correction)` over the input
    :abbr:`BOLD (blood-oxygen-level dependent)` image.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_stc_wf
        wf = init_bold_stc_wf(
            metadata={"RepetitionTime": 2.0,
                      "SliceTiming": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]},
            )

    **Parameters**

        metadata : dict
            BIDS metadata for BOLD file
        name : str
            Name of workflow (default: ``bold_stc_wf``)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        skip_vols
            Number of non-steady-state volumes detected at beginning of ``bold_file``

    **Outputs**

        stc_file
            Slice-timing corrected BOLD series NIfTI file

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
BOLD runs were slice-time corrected using `3dTshift` from
AFNI {afni_ver} [@afni, RRID:SCR_005927].
""".format(afni_ver=''.join(['%02d' % v for v in afni.Info().version() or []]))
    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'skip_vols']), name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['stc_file']), name='outputnode')

    LOGGER.log(25, 'Slice-timing correction will be included.')

    # It would be good to fingerprint memory use of afni.TShift
    slice_timing_correction = pe.Node(
        afni.TShift(outputtype='NIFTI_GZ',
                    tr='{}s'.format(metadata["RepetitionTime"]),
                    slice_timing=metadata['SliceTiming'],
                    slice_encoding_direction=metadata.get('SliceEncodingDirection', 'k')),
        name='slice_timing_correction')

    copy_xform = pe.Node(CopyXForm(), name='copy_xform', mem_gb=0.1)

    workflow.connect([
        (inputnode, slice_timing_correction, [('bold_file', 'in_file'),
                                              ('skip_vols', 'ignore')]),
        (slice_timing_correction, copy_xform, [('out_file', 'in_file')]),
        (inputnode, copy_xform, [('bold_file', 'hdr_file')]),
        (copy_xform, outputnode, [('out_file', 'stc_file')]),
    ])

    return workflow
Ejemplo n.º 13
0
def init_func_derivatives_wf(output_dir, output_spaces, template, freesurfer,
                             use_aroma, cifti_output, name='func_derivatives_wf'):
    """
    Set up a battery of datasinks to store derivatives in the right location
    """
    workflow = Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['source_file',
                    'bold_t1', 'bold_t1_ref', 'bold_mask_t1',
                    'bold_mni', 'bold_mni_ref', 'bold_mask_mni',
                    'bold_aseg_t1', 'bold_aparc_t1', 'bold_aseg_mni',
                    'bold_aparc_mni', 'cifti_variant_key',
                    'confounds', 'surfaces', 'aroma_noise_ics', 'melodic_mix',
                    'nonaggr_denoised_file', 'bold_cifti', 'cifti_variant']),
        name='inputnode')

    ds_confounds = pe.Node(DerivativesDataSink(
        base_directory=output_dir, desc='confounds', suffix='regressors'),
        name="ds_confounds", run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)
    workflow.connect([
        (inputnode, ds_confounds, [('source_file', 'source_file'),
                                   ('confounds', 'in_file')]),
    ])

    # Resample to T1w space
    if 'T1w' in output_spaces:
        ds_bold_t1 = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space='T1w', desc='preproc',
                                keep_dtype=True, compress=True),
            name='ds_bold_t1', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_t1_ref = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space='T1w', suffix='boldref'),
            name='ds_bold_t1_ref', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        ds_bold_mask_t1 = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space='T1w', desc='brain',
                                suffix='mask'),
            name='ds_bold_mask_t1', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, ds_bold_t1, [('source_file', 'source_file'),
                                     ('bold_t1', 'in_file')]),
            (inputnode, ds_bold_t1_ref, [('source_file', 'source_file'),
                                         ('bold_t1_ref', 'in_file')]),
            (inputnode, ds_bold_mask_t1, [('source_file', 'source_file'),
                                          ('bold_mask_t1', 'in_file')]),
        ])
        if freesurfer:
            ds_bold_aseg_t1 = pe.Node(DerivativesDataSink(
                base_directory=output_dir, space='T1w', desc='aseg', suffix='dseg'),
                name='ds_bold_aseg_t1', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            ds_bold_aparc_t1 = pe.Node(DerivativesDataSink(
                base_directory=output_dir,  space='T1w', desc='aparcaseg', suffix='dseg'),
                name='ds_bold_aparc_t1', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            workflow.connect([
                (inputnode, ds_bold_aseg_t1, [('source_file', 'source_file'),
                                              ('bold_aseg_t1', 'in_file')]),
                (inputnode, ds_bold_aparc_t1, [('source_file', 'source_file'),
                                               ('bold_aparc_t1', 'in_file')]),
            ])

    # Resample to template (default: MNI)
    if 'template' in output_spaces:
        ds_bold_mni = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space=template, desc='preproc',
                                keep_dtype=True, compress=True),
            name='ds_bold_mni', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_bold_mni_ref = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space=template, suffix='boldref'),
            name='ds_bold_mni_ref', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        ds_bold_mask_mni = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space=template, desc='brain',
                                suffix='mask'),
            name='ds_bold_mask_mni', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, ds_bold_mni, [('source_file', 'source_file'),
                                      ('bold_mni', 'in_file')]),
            (inputnode, ds_bold_mni_ref, [('source_file', 'source_file'),
                                          ('bold_mni_ref', 'in_file')]),
            (inputnode, ds_bold_mask_mni, [('source_file', 'source_file'),
                                           ('bold_mask_mni', 'in_file')]),
        ])

        if freesurfer:
            ds_bold_aseg_mni = pe.Node(DerivativesDataSink(
                base_directory=output_dir, space=template, desc='aseg', suffix='dseg'),
                name='ds_bold_aseg_mni', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            ds_bold_aparc_mni = pe.Node(DerivativesDataSink(
                base_directory=output_dir,  space=template, desc='aparcaseg', suffix='dseg'),
                name='ds_bold_aparc_mni', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            workflow.connect([
                (inputnode, ds_bold_aseg_mni, [('source_file', 'source_file'),
                                               ('bold_aseg_mni', 'in_file')]),
                (inputnode, ds_bold_aparc_mni, [('source_file', 'source_file'),
                                                ('bold_aparc_mni', 'in_file')]),
            ])

    # fsaverage space
    if freesurfer and any(space.startswith('fs') for space in output_spaces):
        name_surfs = pe.MapNode(GiftiNameSource(
            pattern=r'(?P<LR>[lr])h.(?P<space>\w+).gii', template='space-{space}_hemi-{LR}.func'),
            iterfield='in_file', name='name_surfs', mem_gb=DEFAULT_MEMORY_MIN_GB,
            run_without_submitting=True)
        ds_bold_surfs = pe.MapNode(DerivativesDataSink(base_directory=output_dir),
                                   iterfield=['in_file', 'suffix'], name='ds_bold_surfs',
                                   run_without_submitting=True,
                                   mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, name_surfs, [('surfaces', 'in_file')]),
            (inputnode, ds_bold_surfs, [('source_file', 'source_file'),
                                        ('surfaces', 'in_file')]),
            (name_surfs, ds_bold_surfs, [('out_name', 'suffix')]),
        ])

        # CIFTI output
        if cifti_output and 'template' in output_spaces:
            name_cifti = pe.MapNode(
                CiftiNameSource(), iterfield=['variant'], name='name_cifti',
                mem_gb=DEFAULT_MEMORY_MIN_GB, run_without_submitting=True)
            cifti_bolds = pe.MapNode(
                DerivativesDataSink(base_directory=output_dir, compress=False),
                iterfield=['in_file', 'suffix'], name='cifti_bolds',
                run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
            cifti_key = pe.MapNode(DerivativesDataSink(
                base_directory=output_dir), iterfield=['in_file', 'suffix'],
                name='cifti_key', run_without_submitting=True,
                mem_gb=DEFAULT_MEMORY_MIN_GB)
            workflow.connect([
                (inputnode, name_cifti, [('cifti_variant', 'variant')]),
                (inputnode, cifti_bolds, [('bold_cifti', 'in_file'),
                                          ('source_file', 'source_file')]),
                (name_cifti, cifti_bolds, [('out_name', 'suffix')]),
                (name_cifti, cifti_key, [('out_name', 'suffix')]),
                (inputnode, cifti_key, [('source_file', 'source_file'),
                                        ('cifti_variant_key', 'in_file')]),
            ])

    if use_aroma:
        ds_aroma_noise_ics = pe.Node(DerivativesDataSink(
            base_directory=output_dir, suffix='AROMAnoiseICs'),
            name="ds_aroma_noise_ics", run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_melodic_mix = pe.Node(DerivativesDataSink(
            base_directory=output_dir, desc='MELODIC', suffix='mixing'),
            name="ds_melodic_mix", run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)
        ds_aroma_mni = pe.Node(
            DerivativesDataSink(base_directory=output_dir, space=template,
                                desc='smoothAROMAnonaggr', keep_dtype=True),
            name='ds_aroma_mni', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, ds_aroma_noise_ics, [('source_file', 'source_file'),
                                             ('aroma_noise_ics', 'in_file')]),
            (inputnode, ds_melodic_mix, [('source_file', 'source_file'),
                                         ('melodic_mix', 'in_file')]),
            (inputnode, ds_aroma_mni, [('source_file', 'source_file'),
                                       ('nonaggr_denoised_file', 'in_file')]),
        ])

    return workflow
Ejemplo n.º 14
0
def init_sdc_wf(boldref, omp_nthreads=1, debug=False, ignore=None):
    """
    This workflow implements the heuristics to choose a
    :abbr:`SDC (susceptibility distortion correction)` strategy.
    When no field map information is present within the BIDS inputs,
    the EXPERIMENTAL "fieldmap-less SyN" can be performed, using
    the ``--use-syn`` argument. When ``--force-syn`` is specified,
    then the "fieldmap-less SyN" is always executed and reported
    despite of other fieldmaps available with higher priority.
    In the latter case (some sort of fieldmap(s) is available and
    ``--force-syn`` is requested), then the :abbr:`SDC (susceptibility
    distortion correction)` method applied is that with the
    highest priority.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from sdcflows.workflows.base import init_sdc_wf
        wf = init_sdc_wf(
            fmaps=[{
                'suffix': 'phasediff',
                'phasediff': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
                'magnitude1': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
                'magnitude2': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
            }],
            bold_meta={
                'RepetitionTime': 2.0,
                'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
                'PhaseEncodingDirection': 'j',
            },
        )

    **Parameters**

        boldref : pybids.BIDSFile
            A BIDSFile object with suffix ``bold``, ``sbref`` or ``dwi``.
        omp_nthreads : int
            Maximum number of threads an individual process may use
        debug : bool
            Enable debugging outputs

    **Inputs**
        bold_ref
            A BOLD reference calculated at a previous stage
        bold_ref_brain
            Same as above, but brain-masked
        bold_mask
            Brain mask for the BOLD run
        t1_brain
            T1w image, brain-masked, for the fieldmap-less SyN method
        std2anat_xfm
            List of standard-to-T1w transforms generated during spatial
            normalization (only for the fieldmap-less SyN method).
        template : str
            Name of template from which prior knowledge will be mapped
            into the subject's T1w reference
            (only for the fieldmap-less SyN method)
        templates : str
            Name of templates that index the ``std2anat_xfm`` input list
            (only for the fieldmap-less SyN method).


    **Outputs**
        bold_ref
            An unwarped BOLD reference
        bold_mask
            The corresponding new mask after unwarping
        bold_ref_brain
            Brain-extracted, unwarped BOLD reference
        out_warp
            The deformation field to unwarp the susceptibility distortions
        syn_bold_ref
            If ``--force-syn``, an unwarped BOLD reference with this
            method (for reporting purposes)

    """

    if ignore is None:
        ignore = tuple()

    if not isinstance(ignore, (list, tuple)):
        ignore = tuple(ignore)

    fmaps = defaultdict(list, [])
    for associated in boldref.get_associations(kind='InformedBy'):
        if associated.suffix == 'epi':
            fmaps[associated.suffix].append(associated)
        # elif associated.suffix in ('phase', 'phasediff', 'fieldmap'):
        #     fmaps['fieldmap'].append(associated)

    workflow = Workflow(name='sdc_wf' if boldref else 'sdc_bypass_wf')
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_ref', 'bold_ref_brain', 'bold_mask',
                't1_brain', 'std2anat_xfm', 'template', 'templates']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_ref', 'bold_mask', 'bold_ref_brain',
                'out_warp', 'syn_bold_ref', 'method']),
        name='outputnode')

    # No fieldmaps - forward inputs to outputs
    if not fmaps or 'fieldmaps' in ignore:
        workflow.__postdesc__ = """\
Susceptibility distortion correction (SDC) has been skipped because the
dataset does not contain extra field map acquisitions correctly described
with metadata, and the experimental SDC-SyN method was not explicitly selected.
"""
        outputnode.inputs.method = 'None'
        workflow.connect([
            (inputnode, outputnode, [('bold_ref', 'bold_ref'),
                                     ('bold_mask', 'bold_mask'),
                                     ('bold_ref_brain', 'bold_ref_brain')]),
        ])
        return workflow

    workflow.__postdesc__ = """\
Based on the estimated susceptibility distortion, an
unwarped BOLD reference was calculated for a more accurate
co-registration with the anatomical reference.
"""

    # PEPOLAR path
    if 'epi' in fmaps:
        outputnode.inputs.method = 'PEB/PEPOLAR (phase-encoding based / PE-POLARity)'
        # Get EPI polarities and their metadata
        sdc_unwarp_wf = init_pepolar_unwarp_wf(
            bold_meta=boldref.get_metadata(),
            epi_fmaps=[(fmap, fmap.get_metadata()["PhaseEncodingDirection"])
                       for fmap in fmaps['epi']],
            omp_nthreads=omp_nthreads,
            name='pepolar_unwarp_wf')

        workflow.connect([
            (inputnode, sdc_unwarp_wf, [
                ('bold_ref', 'inputnode.in_reference'),
                ('bold_mask', 'inputnode.in_mask'),
                ('bold_ref_brain', 'inputnode.in_reference_brain')]),
        ])

    # FIELDMAP path
    # elif 'fieldmap' in fmaps:
    #     # Import specific workflows here, so we don't break everything with one
    #     # unused workflow.
    #     suffices = {f.suffix for f in fmaps['fieldmap']}
    #     if 'fieldmap' in suffices:
    #         from .fmap import init_fmap_wf
    #         outputnode.inputs.method = 'FMB (fieldmap-based)'
    #         fmap_estimator_wf = init_fmap_wf(
    #             omp_nthreads=omp_nthreads,
    #             fmap_bspline=False)
    #         # set inputs
    #         fmap_estimator_wf.inputs.inputnode.fieldmap = fmap['fieldmap']
    #         fmap_estimator_wf.inputs.inputnode.magnitude = fmap['magnitude']

    #     if fmap['suffix'] == 'phasediff':
    #         from .phdiff import init_phdiff_wf
    #         fmap_estimator_wf = init_phdiff_wf(omp_nthreads=omp_nthreads)
    #         # set inputs
    #         fmap_estimator_wf.inputs.inputnode.phasediff = fmap['phasediff']
    #         fmap_estimator_wf.inputs.inputnode.magnitude = [
    #             fmap_ for key, fmap_ in sorted(fmap.items())
    #             if key.startswith("magnitude")
    #         ]

    #     sdc_unwarp_wf = init_sdc_unwarp_wf(
    #         omp_nthreads=omp_nthreads,
    #         fmap_demean=fmap_demean,
    #         debug=debug,
    #         name='sdc_unwarp_wf')
    #     sdc_unwarp_wf.inputs.inputnode.metadata = bold_meta

    #     workflow.connect([
    #         (inputnode, sdc_unwarp_wf, [
    #             ('bold_ref', 'inputnode.in_reference'),
    #             ('bold_ref_brain', 'inputnode.in_reference_brain'),
    #             ('bold_mask', 'inputnode.in_mask')]),
    #         (fmap_estimator_wf, sdc_unwarp_wf, [
    #             ('outputnode.fmap', 'inputnode.fmap'),
    #             ('outputnode.fmap_ref', 'inputnode.fmap_ref'),
    #             ('outputnode.fmap_mask', 'inputnode.fmap_mask')]),
    #     ])

    # # FIELDMAP-less path
    # if any(fm['suffix'] == 'syn' for fm in fmaps):
    #     # Select template
    #     sdc_select_std = pe.Node(KeySelect(
    #         fields=['std2anat_xfm']),
    #         name='sdc_select_std', run_without_submitting=True)

    #     syn_sdc_wf = init_syn_sdc_wf(
    #         bold_pe=bold_meta.get('PhaseEncodingDirection', None),
    #         omp_nthreads=omp_nthreads)

    #     workflow.connect([
    #         (inputnode, sdc_select_std, [
    #             ('template', 'key'),
    #             ('templates', 'keys'),
    #             ('std2anat_xfm', 'std2anat_xfm')]),
    #         (sdc_select_std, syn_sdc_wf, [
    #             ('std2anat_xfm', 'inputnode.std2anat_xfm')]),
    #         (inputnode, syn_sdc_wf, [
    #             ('t1_brain', 'inputnode.t1_brain'),
    #             ('bold_ref', 'inputnode.bold_ref'),
    #             ('bold_ref_brain', 'inputnode.bold_ref_brain'),
    #             ('template', 'inputnode.template')]),
    #     ])

    #     # XXX Eliminate branch when forcing isn't an option
    #     if fmap['suffix'] == 'syn':  # No fieldmaps, but --use-syn
    #         outputnode.inputs.method = 'FLB ("fieldmap-less", SyN-based)'
    #         sdc_unwarp_wf = syn_sdc_wf
    #     else:  # --force-syn was called when other fieldmap was present
    #         sdc_unwarp_wf.__desc__ = None
    #         workflow.connect([
    #             (syn_sdc_wf, outputnode, [
    #                 ('outputnode.out_reference', 'syn_bold_ref')]),
    #         ])

    workflow.connect([
        (sdc_unwarp_wf, outputnode, [
            ('outputnode.out_warp', 'out_warp'),
            ('outputnode.out_reference', 'bold_ref'),
            ('outputnode.out_reference_brain', 'bold_ref_brain'),
            ('outputnode.out_mask', 'bold_mask')]),
    ])

    return workflow
Ejemplo n.º 15
0
def init_bbreg_wf(use_bbr, bold2t1w_dof, omp_nthreads, name='bbreg_wf'):
    """
    This workflow uses FreeSurfer's ``bbregister`` to register a BOLD image to
    a T1-weighted structural image.

    It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`,
    which performs the same task using FSL's FLIRT with a BBR cost function.

    The ``use_bbr`` option permits a high degree of control over registration.
    If ``False``, standard, affine coregistration will be performed using
    FreeSurfer's ``mri_coreg`` tool.
    If ``True``, ``bbregister`` will be seeded with the initial transform found
    by ``mri_coreg`` (equivalent to running ``bbregister --init-coreg``).
    If ``None``, after ``bbregister`` is run, the resulting affine transform
    will be compared to the initial transform found by ``mri_coreg``.
    Excessive deviation will result in rejecting the BBR refinement and
    accepting the original, affine registration.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.registration import init_bbreg_wf
        wf = init_bbreg_wf(use_bbr=True, bold2t1w_dof=9, omp_nthreads=1)


    Parameters

        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        name : str, optional
            Workflow name (default: bbreg_wf)


    Inputs

        in_file
            Reference BOLD image to be registered
        t1_2_fsnative_reverse_transform
            FSL-style affine matrix translating from FreeSurfer T1.mgz to T1w
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID (must have folder in SUBJECTS_DIR)
        t1_brain
            Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)
        t1_seg
            Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)


    Outputs

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        itk_t1_to_bold
            Affine transform from T1 space to BOLD space (ITK format)
        out_report
            Reportlet for assessing registration quality
        fallback
            Boolean indicating whether BBR was rejected (mri_coreg registration returned)

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`bbregister` (FreeSurfer) which implements boundary-based registration [@bbr].
Co-registration was configured with nine degrees of freedom to account
for distortions remaining in the BOLD reference.
"""

    inputnode = pe.Node(
        niu.IdentityInterface([
            'in_file',
            't1_2_fsnative_reverse_transform', 'subjects_dir', 'subject_id',  # BBRegister
            't1_seg', 't1_brain']),  # FLIRT BBR
        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
        name='outputnode')

    mri_coreg = pe.Node(
        MRICoregRPT(dof=bold2t1w_dof, sep=[4], ftol=0.0001, linmintol=0.01,
                    generate_report=not use_bbr),
        name='mri_coreg', n_procs=omp_nthreads, mem_gb=5)

    lta_concat = pe.Node(ConcatenateLTA(out_file='out.lta'), name='lta_concat')
    # XXX LTA-FSL-ITK may ultimately be able to be replaced with a straightforward
    # LTA-ITK transform, but right now the translation parameters are off.
    lta2fsl_fwd = pe.Node(LTAConvert(out_fsl=True), name='lta2fsl_fwd')
    lta2fsl_inv = pe.Node(LTAConvert(out_fsl=True, invert=True), name='lta2fsl_inv')
    fsl2itk_fwd = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_fwd', mem_gb=DEFAULT_MEMORY_MIN_GB)
    fsl2itk_inv = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_inv', mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, mri_coreg, [('subjects_dir', 'subjects_dir'),
                                ('subject_id', 'subject_id'),
                                ('in_file', 'source_file')]),
        # Output ITK transforms
        (inputnode, lta_concat, [('t1_2_fsnative_reverse_transform', 'in_lta2')]),
        (lta_concat, lta2fsl_fwd, [('out_file', 'in_lta')]),
        (lta_concat, lta2fsl_inv, [('out_file', 'in_lta')]),
        (inputnode, fsl2itk_fwd, [('t1_brain', 'reference_file'),
                                  ('in_file', 'source_file')]),
        (inputnode, fsl2itk_inv, [('in_file', 'reference_file'),
                                  ('t1_brain', 'source_file')]),
        (lta2fsl_fwd, fsl2itk_fwd, [('out_fsl', 'transform_file')]),
        (lta2fsl_inv, fsl2itk_inv, [('out_fsl', 'transform_file')]),
        (fsl2itk_fwd, outputnode, [('itk_transform', 'itk_bold_to_t1')]),
        (fsl2itk_inv, outputnode, [('itk_transform', 'itk_t1_to_bold')]),
    ])

    # Short-circuit workflow building, use initial registration
    if use_bbr is False:
        workflow.connect([
            (mri_coreg, outputnode, [('out_report', 'out_report')]),
            (mri_coreg, lta_concat, [('out_lta_file', 'in_lta1')])])
        outputnode.inputs.fallback = True

        return workflow

    bbregister = pe.Node(
        BBRegisterRPT(dof=bold2t1w_dof, contrast_type='t2', registered_file=True,
                      out_lta_file=True, generate_report=True),
        name='bbregister', mem_gb=12)

    workflow.connect([
        (inputnode, bbregister, [('subjects_dir', 'subjects_dir'),
                                 ('subject_id', 'subject_id'),
                                 ('in_file', 'source_file')]),
        (mri_coreg, bbregister, [('out_lta_file', 'init_reg_file')]),
    ])

    # Short-circuit workflow building, use boundary-based registration
    if use_bbr is True:
        workflow.connect([
            (bbregister, outputnode, [('out_report', 'out_report')]),
            (bbregister, lta_concat, [('out_lta_file', 'in_lta1')])])
        outputnode.inputs.fallback = False

        return workflow

    transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms')
    reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports')

    lta_ras2ras = pe.MapNode(LTAConvert(out_lta=True), iterfield=['in_lta'],
                             name='lta_ras2ras', mem_gb=2)
    compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms')

    select_transform = pe.Node(niu.Select(), run_without_submitting=True, name='select_transform')
    select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report')

    workflow.connect([
        (bbregister, transforms, [('out_lta_file', 'in1')]),
        (mri_coreg, transforms, [('out_lta_file', 'in2')]),
        # Normalize LTA transforms to RAS2RAS (inputs are VOX2VOX) and compare
        (transforms, lta_ras2ras, [('out', 'in_lta')]),
        (lta_ras2ras, compare_transforms, [('out_lta', 'lta_list')]),
        (compare_transforms, outputnode, [('out', 'fallback')]),
        # Select output transform
        (transforms, select_transform, [('out', 'inlist')]),
        (compare_transforms, select_transform, [('out', 'index')]),
        (select_transform, lta_concat, [('out', 'in_lta1')]),
        # Select output report
        (bbregister, reports, [('out_report', 'in1')]),
        (mri_coreg, reports, [('out_report', 'in2')]),
        (reports, select_report, [('out', 'inlist')]),
        (compare_transforms, select_report, [('out', 'index')]),
        (select_report, outputnode, [('out', 'out_report')]),
    ])

    return workflow
Ejemplo n.º 16
0
def init_brainextraction_wf(name="brainextraction_wf"):
    """
    Remove nonbrain tissue from images.

    Parameters
    ----------
    name : :obj:`str`, optional
        Workflow name (default: ``"brainextraction_wf"``)

    Inputs
    ------
    in_file : :obj:`str`
        the GRE magnitude or EPI reference to be brain-extracted
    bspline_dist : :obj:`int`, optional
        Integer to replace default distance of b-spline separation for N4

    Outputs
    -------
    out_file : :obj:`str`
        the input file after N4 and smart clipping
    out_brain : :obj:`str`
        the output file, just the brain extracted
    out_mask : :obj:`str`
        the calculated mask
    out_probseg : :obj:`str`
        a probability map that the random walker reached
        a given voxel (some sort of "soft" brainmask)

    """
    from nipype.interfaces.ants import N4BiasFieldCorrection
    from niworkflows.interfaces.nibabel import IntensityClip
    from ..interfaces.brainmask import BrainExtraction

    wf = Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=("in_file",
                                                      "bspline_dist")),
                        name="inputnode")
    outputnode = pe.Node(
        niu.IdentityInterface(fields=(
            "out_file",
            "out_brain",
            "out_mask",
            "out_probseg",
        )),
        name="outputnode",
    )
    clipper_pre = pe.Node(IntensityClip(), name="clipper_pre")

    # de-gradient the fields ("bias/illumination artifact")
    n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            shrink_factor=4,
        ),
        n_procs=8,
        name="n4",
    )
    clipper_post = pe.Node(IntensityClip(p_min=0.01, p_max=99.9),
                           name="clipper_post")
    masker = pe.Node(BrainExtraction(), name="masker")

    # fmt:off
    wf.connect([
        (inputnode, clipper_pre, [("in_file", "in_file")]),
        (inputnode, n4, [("bspline_dist", "bspline_fitting_distance")]),
        (clipper_pre, n4, [("out_file", "input_image")]),
        (n4, clipper_post, [("output_image", "in_file")]),
        (clipper_post, masker, [("out_file", "in_file")]),
        (clipper_post, outputnode, [("out_file", "out_file")]),
        (masker, outputnode, [("out_file", "out_brain"),
                              ("out_mask", "out_mask"),
                              ("out_probseg", "out_probseg")]),
    ])
    # fmt:on

    return wf
Ejemplo n.º 17
0
def init_bold_reg_wf(freesurfer, use_bbr, bold2t1w_dof, mem_gb, omp_nthreads,
                     use_compression=True, write_report=True, name='bold_reg_wf'):
    """
    Calculates the registration between a reference BOLD image and T1-space
    using a boundary-based registration (BBR) cost function.

    If FreeSurfer-based preprocessing is enabled, the ``bbregister`` utility
    is used to align the BOLD images to the reconstructed subject, and the
    resulting transform is adjusted to target the T1 space.
    If FreeSurfer-based preprocessing is disabled, FSL FLIRT is used with the
    BBR cost function to directly target the T1 space.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.registration import init_bold_reg_wf
        wf = init_bold_reg_wf(freesurfer=True,
                              mem_gb=3,
                              omp_nthreads=1,
                              use_bbr=True,
                              bold2t1w_dof=9)

    **Parameters**

        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister)
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_reg_wf``)
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to T1
        write_report : bool
            Whether a reportlet should be stored

    **Inputs**

        ref_bold_brain
            Reference image to which BOLD series is aligned
            If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
        t1_brain
            Skull-stripped ``t1_preproc``
        t1_seg
            Segmentation of preprocessed structural image, including
            gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_reverse_transform
            LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    **Outputs**

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        itk_t1_to_bold
            Affine transform from T1 space to BOLD space (ITK format)
        fallback
            Boolean indicating whether BBR was rejected (mri_coreg registration returned)


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['ref_bold_brain', 't1_brain', 't1_seg',
                    'subjects_dir', 'subject_id', 't1_2_fsnative_reverse_transform']),
        name='inputnode'
    )

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'itk_bold_to_t1', 'itk_t1_to_bold', 'fallback']),
        name='outputnode'
    )

    if freesurfer:
        bbr_wf = init_bbreg_wf(use_bbr=use_bbr, bold2t1w_dof=bold2t1w_dof,
                               omp_nthreads=omp_nthreads)
    else:
        bbr_wf = init_fsl_bbr_wf(use_bbr=use_bbr, bold2t1w_dof=bold2t1w_dof)

    workflow.connect([
        (inputnode, bbr_wf, [
            ('ref_bold_brain', 'inputnode.in_file'),
            ('t1_2_fsnative_reverse_transform', 'inputnode.t1_2_fsnative_reverse_transform'),
            ('subjects_dir', 'inputnode.subjects_dir'),
            ('subject_id', 'inputnode.subject_id'),
            ('t1_seg', 'inputnode.t1_seg'),
            ('t1_brain', 'inputnode.t1_brain')]),
        (bbr_wf, outputnode, [('outputnode.itk_bold_to_t1', 'itk_bold_to_t1'),
                              ('outputnode.itk_t1_to_bold', 'itk_t1_to_bold'),
                              ('outputnode.fallback', 'fallback')]),
    ])

    if write_report:
        ds_report_reg = pe.Node(
            DerivativesDataSink(),
            name='ds_report_reg', run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB)

        def _bold_reg_suffix(fallback, freesurfer):
            if fallback:
                return 'coreg' if freesurfer else 'flirtnobbr'
            return 'bbregister' if freesurfer else 'flirtbbr'

        workflow.connect([
            (bbr_wf, ds_report_reg, [
                ('outputnode.out_report', 'in_file'),
                (('outputnode.fallback', _bold_reg_suffix, freesurfer), 'suffix')]),
        ])

    return workflow
Ejemplo n.º 18
0
def init_anat_derivatives_wf(
    *,
    bids_root,
    freesurfer,
    num_t1w,
    output_dir,
    spaces,
    name="anat_derivatives_wf",
    tpm_labels=BIDS_TISSUE_ORDER,
):
    """
    Set up a battery of datasinks to store derivatives in the right location.

    Parameters
    ----------
    bids_root : :obj:`str`
        Root path of BIDS dataset
    freesurfer : :obj:`bool`
        FreeSurfer was enabled
    num_t1w : :obj:`int`
        Number of T1w images
    output_dir : :obj:`str`
        Directory in which to save derivatives
    name : :obj:`str`
        Workflow name (default: anat_derivatives_wf)
    tpm_labels : :obj:`tuple`
        Tissue probability maps in order

    Inputs
    ------
    template
        Template space and specifications
    source_files
        List of input T1w images
    t1w_ref_xfms
        List of affine transforms to realign input T1w images
    t1w_preproc
        The T1w reference map, which is calculated as the average of bias-corrected
        and preprocessed T1w images, defining the anatomical space.
    t1w_mask
        Mask of the ``t1w_preproc``
    t1w_dseg
        Segmentation in T1w space
    t1w_tpms
        Tissue probability maps in T1w space
    anat2std_xfm
        Nonlinear spatial transform to resample imaging data given in anatomical space
        into standard space.
    std2anat_xfm
        Inverse transform of ``anat2std_xfm``
    std_t1w
        T1w reference resampled in one or more standard spaces.
    std_mask
        Mask of skull-stripped template, in standard space
    std_dseg
        Segmentation, resampled into standard space
    std_tpms
        Tissue probability maps in standard space
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to
        FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed
        subject space to T1w
    surfaces
        GIFTI surfaces (gray/white boundary, midthickness, pial, inflated)
    t1w_fs_aseg
        FreeSurfer's aseg segmentation, in native T1w space
    t1w_fs_aparc
        FreeSurfer's aparc+aseg segmentation, in native T1w space

    """
    from niworkflows.interfaces.utility import KeySelect

    workflow = Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "template",
            "source_files",
            "t1w_ref_xfms",
            "t1w_preproc",
            "t1w_mask",
            "t1w_dseg",
            "t1w_tpms",
            "anat2std_xfm",
            "std2anat_xfm",
            "t1w2fsnative_xfm",
            "fsnative2t1w_xfm",
            "surfaces",
            "t1w_fs_aseg",
            "t1w_fs_aparc",
        ]),
        name="inputnode",
    )

    raw_sources = pe.Node(niu.Function(function=_bids_relative),
                          name="raw_sources")
    raw_sources.inputs.bids_root = bids_root

    ds_t1w_preproc = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            desc="preproc",
                            compress=True),
        name="ds_t1w_preproc",
        run_without_submitting=True,
    )
    ds_t1w_preproc.inputs.SkullStripped = False

    ds_t1w_mask = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            desc="brain",
                            suffix="mask",
                            compress=True),
        name="ds_t1w_mask",
        run_without_submitting=True,
    )
    ds_t1w_mask.inputs.Type = "Brain"

    ds_t1w_dseg = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            suffix="dseg",
                            compress=True),
        name="ds_t1w_dseg",
        run_without_submitting=True,
    )

    ds_t1w_tpms = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            suffix="probseg",
                            compress=True),
        name="ds_t1w_tpms",
        run_without_submitting=True,
    )
    ds_t1w_tpms.inputs.label = tpm_labels

    # fmt:off
    workflow.connect([
        (inputnode, raw_sources, [('source_files', 'in_files')]),
        (inputnode, ds_t1w_preproc, [('t1w_preproc', 'in_file'),
                                     ('source_files', 'source_file')]),
        (inputnode, ds_t1w_mask, [('t1w_mask', 'in_file'),
                                  ('source_files', 'source_file')]),
        (inputnode, ds_t1w_tpms, [('t1w_tpms', 'in_file'),
                                  ('source_files', 'source_file')]),
        (inputnode, ds_t1w_dseg, [('t1w_dseg', 'in_file'),
                                  ('source_files', 'source_file')]),
        (raw_sources, ds_t1w_mask, [('out', 'RawSources')]),
    ])
    # fmt:on

    # Transforms
    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        ds_std2t1w_xfm = pe.MapNode(
            DerivativesDataSink(base_directory=output_dir,
                                to="T1w",
                                mode="image",
                                suffix="xfm"),
            iterfield=("in_file", "from"),
            name="ds_std2t1w_xfm",
            run_without_submitting=True,
        )

        ds_t1w2std_xfm = pe.MapNode(
            DerivativesDataSink(base_directory=output_dir,
                                mode="image",
                                suffix="xfm",
                                **{"from": "T1w"}),
            iterfield=("in_file", "to"),
            name="ds_t1w2std_xfm",
            run_without_submitting=True,
        )

        # fmt:off
        workflow.connect([
            (inputnode, ds_t1w2std_xfm, [('anat2std_xfm', 'in_file'),
                                         (('template', _combine_cohort), 'to'),
                                         ('source_files', 'source_file')]),
            (inputnode, ds_std2t1w_xfm, [('std2anat_xfm', 'in_file'),
                                         (('template', _combine_cohort),
                                          'from'),
                                         ('source_files', 'source_file')]),
        ])
        # fmt:on

    if num_t1w > 1:
        # Please note the dictionary unpacking to provide the from argument.
        # It is necessary because from is a protected keyword (not allowed as argument name).
        ds_t1w_ref_xfms = pe.MapNode(
            DerivativesDataSink(
                base_directory=output_dir,
                to="T1w",
                mode="image",
                suffix="xfm",
                extension="txt",
                **{"from": "orig"},
            ),
            iterfield=["source_file", "in_file"],
            name="ds_t1w_ref_xfms",
            run_without_submitting=True,
        )
        # fmt:off
        workflow.connect([
            (inputnode, ds_t1w_ref_xfms, [('source_files', 'source_file'),
                                          ('t1w_ref_xfms', 'in_file')]),
        ])
        # fmt:on

    # Write derivatives in standard spaces specified by --output-spaces
    if getattr(spaces, "_cached") is not None and spaces.cached.references:
        from niworkflows.interfaces.space import SpaceDataSource
        from niworkflows.interfaces.nibabel import GenerateSamplingReference
        from niworkflows.interfaces.fixes import (
            FixHeaderApplyTransforms as ApplyTransforms, )

        from ..interfaces.templateflow import TemplateFlowSelect

        spacesource = pe.Node(SpaceDataSource(),
                              name="spacesource",
                              run_without_submitting=True)
        spacesource.iterables = (
            "in_tuple",
            [(s.fullname, s.spec)
             for s in spaces.cached.get_standard(dim=(3, ))],
        )

        gen_tplid = pe.Node(
            niu.Function(function=_fmt_cohort),
            name="gen_tplid",
            run_without_submitting=True,
        )

        select_xfm = pe.Node(
            KeySelect(fields=["anat2std_xfm"]),
            name="select_xfm",
            run_without_submitting=True,
        )
        select_tpl = pe.Node(TemplateFlowSelect(),
                             name="select_tpl",
                             run_without_submitting=True)

        gen_ref = pe.Node(GenerateSamplingReference(),
                          name="gen_ref",
                          mem_gb=0.01)

        # Mask T1w preproc images
        mask_t1w = pe.Node(ApplyMask(), name='mask_t1w')

        # Resample T1w-space inputs
        anat2std_t1w = pe.Node(
            ApplyTransforms(
                dimension=3,
                default_value=0,
                float=True,
                interpolation="LanczosWindowedSinc",
            ),
            name="anat2std_t1w",
        )

        anat2std_mask = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                                name="anat2std_mask")
        anat2std_dseg = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                                name="anat2std_dseg")
        anat2std_tpms = pe.MapNode(
            ApplyTransforms(dimension=3,
                            default_value=0,
                            float=True,
                            interpolation="Gaussian"),
            iterfield=["input_image"],
            name="anat2std_tpms",
        )

        ds_std_t1w = pe.Node(
            DerivativesDataSink(
                base_directory=output_dir,
                desc="preproc",
                compress=True,
            ),
            name="ds_std_t1w",
            run_without_submitting=True,
        )
        ds_std_t1w.inputs.SkullStripped = True

        ds_std_mask = pe.Node(
            DerivativesDataSink(base_directory=output_dir,
                                desc="brain",
                                suffix="mask",
                                compress=True),
            name="ds_std_mask",
            run_without_submitting=True,
        )
        ds_std_mask.inputs.Type = "Brain"

        ds_std_dseg = pe.Node(
            DerivativesDataSink(base_directory=output_dir,
                                suffix="dseg",
                                compress=True),
            name="ds_std_dseg",
            run_without_submitting=True,
        )

        ds_std_tpms = pe.Node(
            DerivativesDataSink(base_directory=output_dir,
                                suffix="probseg",
                                compress=True),
            name="ds_std_tpms",
            run_without_submitting=True,
        )

        # CRITICAL: the sequence of labels here (CSF-GM-WM) is that of the output of FSL-FAST
        #           (intensity mean, per tissue). This order HAS to be matched also by the ``tpms``
        #           output in the data/io_spec.json file.
        ds_std_tpms.inputs.label = tpm_labels
        # fmt:off
        workflow.connect([
            (inputnode, mask_t1w, [('t1w_preproc', 'in_file'),
                                   ('t1w_mask', 'in_mask')]),
            (mask_t1w, anat2std_t1w, [('out_file', 'input_image')]),
            (inputnode, anat2std_mask, [('t1w_mask', 'input_image')]),
            (inputnode, anat2std_dseg, [('t1w_dseg', 'input_image')]),
            (inputnode, anat2std_tpms, [('t1w_tpms', 'input_image')]),
            (inputnode, gen_ref, [('t1w_preproc', 'moving_image')]),
            (inputnode, select_xfm, [('anat2std_xfm', 'anat2std_xfm'),
                                     ('template', 'keys')]),
            (spacesource, gen_tplid, [('space', 'template'),
                                      ('cohort', 'cohort')]),
            (gen_tplid, select_xfm, [('out', 'key')]),
            (spacesource, select_tpl, [('space', 'template'),
                                       ('cohort', 'cohort'),
                                       (('resolution', _no_native),
                                        'resolution')]),
            (spacesource, gen_ref, [(('resolution', _is_native), 'keep_native')
                                    ]),
            (select_tpl, gen_ref, [('t1w_file', 'fixed_image')]),
            (anat2std_t1w, ds_std_t1w, [('output_image', 'in_file')]),
            (anat2std_mask, ds_std_mask, [('output_image', 'in_file')]),
            (anat2std_dseg, ds_std_dseg, [('output_image', 'in_file')]),
            (anat2std_tpms, ds_std_tpms, [('output_image', 'in_file')]),
            (select_tpl, ds_std_mask, [(('brain_mask', _drop_path),
                                        'RawSources')]),
        ])

        workflow.connect(
            # Connect apply transforms nodes
            [(gen_ref, n, [('out_file', 'reference_image')])
             for n in (anat2std_t1w, anat2std_mask, anat2std_dseg,
                       anat2std_tpms)] +
            [(select_xfm, n, [('anat2std_xfm', 'transforms')])
             for n in (anat2std_t1w, anat2std_mask, anat2std_dseg,
                       anat2std_tpms)]
            # Connect the source_file input of these datasinks
            + [(inputnode, n, [('source_files', 'source_file')])
               for n in (ds_std_t1w, ds_std_mask, ds_std_dseg, ds_std_tpms)]
            # Connect the space input of these datasinks
            + [(spacesource, n, [('space', 'space'), ('cohort', 'cohort'),
                                 ('resolution', 'resolution')])
               for n in (ds_std_t1w, ds_std_mask, ds_std_dseg, ds_std_tpms)])
        # fmt:on

    if not freesurfer:
        return workflow

    from niworkflows.interfaces.nitransforms import ConcatenateXFMs
    from niworkflows.interfaces.surf import Path2BIDS

    # FS native space transforms
    lta2itk_fwd = pe.Node(ConcatenateXFMs(),
                          name="lta2itk_fwd",
                          run_without_submitting=True)
    lta2itk_inv = pe.Node(ConcatenateXFMs(),
                          name="lta2itk_inv",
                          run_without_submitting=True)
    ds_t1w_fsnative = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            mode="image",
            to="fsnative",
            suffix="xfm",
            extension="txt",
            **{"from": "T1w"},
        ),
        name="ds_t1w_fsnative",
        run_without_submitting=True,
    )
    ds_fsnative_t1w = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            mode="image",
            to="T1w",
            suffix="xfm",
            extension="txt",
            **{"from": "fsnative"},
        ),
        name="ds_fsnative_t1w",
        run_without_submitting=True,
    )
    # Surfaces
    name_surfs = pe.MapNode(Path2BIDS(),
                            iterfield="in_file",
                            name="name_surfs",
                            run_without_submitting=True)
    ds_surfs = pe.MapNode(
        DerivativesDataSink(base_directory=output_dir, extension=".surf.gii"),
        iterfield=["in_file", "hemi", "suffix"],
        name="ds_surfs",
        run_without_submitting=True,
    )
    # Parcellations
    ds_t1w_fsaseg = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            desc="aseg",
                            suffix="dseg",
                            compress=True),
        name="ds_t1w_fsaseg",
        run_without_submitting=True,
    )
    ds_t1w_fsparc = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            desc="aparcaseg",
                            suffix="dseg",
                            compress=True),
        name="ds_t1w_fsparc",
        run_without_submitting=True,
    )

    # fmt:off
    workflow.connect([
        (inputnode, lta2itk_fwd, [('t1w2fsnative_xfm', 'in_xfms')]),
        (inputnode, lta2itk_inv, [('fsnative2t1w_xfm', 'in_xfms')]),
        (inputnode, ds_t1w_fsnative, [('source_files', 'source_file')]),
        (lta2itk_fwd, ds_t1w_fsnative, [('out_xfm', 'in_file')]),
        (inputnode, ds_fsnative_t1w, [('source_files', 'source_file')]),
        (lta2itk_inv, ds_fsnative_t1w, [('out_xfm', 'in_file')]),
        (inputnode, name_surfs, [('surfaces', 'in_file')]),
        (inputnode, ds_surfs, [('surfaces', 'in_file'),
                               ('source_files', 'source_file')]),
        (name_surfs, ds_surfs, [('hemi', 'hemi'), ('suffix', 'suffix')]),
        (inputnode, ds_t1w_fsaseg, [('t1w_fs_aseg', 'in_file'),
                                    ('source_files', 'source_file')]),
        (inputnode, ds_t1w_fsparc, [('t1w_fs_aparc', 'in_file'),
                                    ('source_files', 'source_file')]),
    ])
    # fmt:on
    return workflow
Ejemplo n.º 19
0
def init_fsl_bbr_wf(use_bbr, bold2t1w_dof, name='fsl_bbr_wf'):
    """
    This workflow uses FSL FLIRT to register a BOLD image to a T1-weighted
    structural image, using a boundary-based registration (BBR) cost function.

    It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`,
    which performs the same task using FreeSurfer's ``bbregister``.

    The ``use_bbr`` option permits a high degree of control over registration.
    If ``False``, standard, rigid coregistration will be performed by FLIRT.
    If ``True``, FLIRT-BBR will be seeded with the initial transform found by
    the rigid coregistration.
    If ``None``, after FLIRT-BBR is run, the resulting affine transform
    will be compared to the initial transform found by FLIRT.
    Excessive deviation will result in rejecting the BBR refinement and
    accepting the original, affine registration.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.registration import init_fsl_bbr_wf
        wf = init_fsl_bbr_wf(use_bbr=True, bold2t1w_dof=9)


    Parameters

        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        name : str, optional
            Workflow name (default: fsl_bbr_wf)


    Inputs

        in_file
            Reference BOLD image to be registered
        t1_brain
            Skull-stripped T1-weighted structural image
        t1_seg
            FAST segmentation of ``t1_brain``
        t1_2_fsnative_reverse_transform
            Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
        subjects_dir
            Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
        subject_id
            Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)


    Outputs

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1w space (ITK format)
        itk_t1_to_bold
            Affine transform from T1 space to BOLD space (ITK format)
        out_report
            Reportlet for assessing registration quality
        fallback
            Boolean indicating whether BBR was rejected (rigid FLIRT registration returned)

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`flirt` [FSL {fsl_ver}, @flirt] with the boundary-based registration [@bbr]
cost-function.
Co-registration was configured with nine degrees of freedom to account
for distortions remaining in the BOLD reference.
""".format(fsl_ver=FLIRTRPT().version or '<ver>')

    inputnode = pe.Node(
        niu.IdentityInterface([
            'in_file',
            't1_2_fsnative_reverse_transform', 'subjects_dir', 'subject_id',  # BBRegister
            't1_seg', 't1_brain']),  # FLIRT BBR
        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
        name='outputnode')

    wm_mask = pe.Node(niu.Function(function=extract_wm), name='wm_mask')
    flt_bbr_init = pe.Node(FLIRTRPT(dof=6, generate_report=not use_bbr,
                                    uses_qform=True), name='flt_bbr_init')

    invt_bbr = pe.Node(fsl.ConvertXFM(invert_xfm=True), name='invt_bbr',
                       mem_gb=DEFAULT_MEMORY_MIN_GB)

    #  BOLD to T1 transform matrix is from fsl, using c3 tools to convert to
    #  something ANTs will like.
    fsl2itk_fwd = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_fwd', mem_gb=DEFAULT_MEMORY_MIN_GB)
    fsl2itk_inv = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_inv', mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, flt_bbr_init, [('in_file', 'in_file'),
                                   ('t1_brain', 'reference')]),
        (inputnode, fsl2itk_fwd, [('t1_brain', 'reference_file'),
                                  ('in_file', 'source_file')]),
        (inputnode, fsl2itk_inv, [('in_file', 'reference_file'),
                                  ('t1_brain', 'source_file')]),
        (invt_bbr, fsl2itk_inv, [('out_file', 'transform_file')]),
        (fsl2itk_fwd, outputnode, [('itk_transform', 'itk_bold_to_t1')]),
        (fsl2itk_inv, outputnode, [('itk_transform', 'itk_t1_to_bold')]),
    ])

    # Short-circuit workflow building, use rigid registration
    if use_bbr is False:
        workflow.connect([
            (flt_bbr_init, invt_bbr, [('out_matrix_file', 'in_file')]),
            (flt_bbr_init, fsl2itk_fwd, [('out_matrix_file', 'transform_file')]),
            (flt_bbr_init, outputnode, [('out_report', 'out_report')]),
        ])
        outputnode.inputs.fallback = True

        return workflow

    flt_bbr = pe.Node(
        FLIRTRPT(cost_func='bbr', dof=bold2t1w_dof, generate_report=True),
        name='flt_bbr')

    FSLDIR = os.getenv('FSLDIR')
    if FSLDIR:
        flt_bbr.inputs.schedule = op.join(FSLDIR, 'etc/flirtsch/bbr.sch')
    else:
        # Should mostly be hit while building docs
        LOGGER.warning("FSLDIR unset - using packaged BBR schedule")
        flt_bbr.inputs.schedule = pkgr.resource_filename('fmriprep', 'data/flirtsch/bbr.sch')

    workflow.connect([
        (inputnode, wm_mask, [('t1_seg', 'in_seg')]),
        (inputnode, flt_bbr, [('in_file', 'in_file'),
                              ('t1_brain', 'reference')]),
        (flt_bbr_init, flt_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (wm_mask, flt_bbr, [('out', 'wm_seg')]),
    ])

    # Short-circuit workflow building, use boundary-based registration
    if use_bbr is True:
        workflow.connect([
            (flt_bbr, invt_bbr, [('out_matrix_file', 'in_file')]),
            (flt_bbr, fsl2itk_fwd, [('out_matrix_file', 'transform_file')]),
            (flt_bbr, outputnode, [('out_report', 'out_report')]),
        ])
        outputnode.inputs.fallback = False

        return workflow

    transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms')
    reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports')

    compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms')

    select_transform = pe.Node(niu.Select(), run_without_submitting=True, name='select_transform')
    select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report')

    fsl_to_lta = pe.MapNode(LTAConvert(out_lta=True), iterfield=['in_fsl'],
                            name='fsl_to_lta')

    workflow.connect([
        (flt_bbr, transforms, [('out_matrix_file', 'in1')]),
        (flt_bbr_init, transforms, [('out_matrix_file', 'in2')]),
        # Convert FSL transforms to LTA (RAS2RAS) transforms and compare
        (inputnode, fsl_to_lta, [('in_file', 'source_file'),
                                 ('t1_brain', 'target_file')]),
        (transforms, fsl_to_lta, [('out', 'in_fsl')]),
        (fsl_to_lta, compare_transforms, [('out_lta', 'lta_list')]),
        (compare_transforms, outputnode, [('out', 'fallback')]),
        # Select output transform
        (transforms, select_transform, [('out', 'inlist')]),
        (compare_transforms, select_transform, [('out', 'index')]),
        (select_transform, invt_bbr, [('out', 'in_file')]),
        (select_transform, fsl2itk_fwd, [('out', 'transform_file')]),
        (flt_bbr, reports, [('out_report', 'in1')]),
        (flt_bbr_init, reports, [('out_report', 'in2')]),
        (reports, select_report, [('out', 'inlist')]),
        (compare_transforms, select_report, [('out', 'index')]),
        (select_report, outputnode, [('out', 'out_report')]),
    ])

    return workflow
Ejemplo n.º 20
0
def init_carpetplot_wf(standard_spaces,
                       mem_gb,
                       metadata,
                       name="bold_carpet_wf"):
    """

    Resamples the MNI parcellation (ad-hoc parcellation derived from the
    Harvard-Oxford template and others).

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB - please note that this size
            should be calculated after resamplings that may extend
            the FoV
        metadata : dict
            BIDS metadata for BOLD file
        name : str
            Name of workflow (default: ``bold_carpet_wf``)

    **Inputs**

        bold
            BOLD image, after the prescribed corrections (STC, HMC and SDC)
            when available.
        bold_mask
            BOLD series mask
        confounds_file
            TSV of all aggregated confounds
        t1_bold_xform
            Affine matrix that maps the T1w space into alignment with
            the native BOLD space
        std2anat_xfm
            ANTs-compatible affine-and-warp transform file

    **Outputs**

        out_carpetplot
            Path of the generated SVG file

    """
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold', 'bold_mask', 'confounds_file', 't1_bold_xform', 'std2anat_xfm'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['out_carpetplot']),
                         name='outputnode')

    select_std = pe.Node(KeySelect(keys=list(standard_spaces.keys()),
                                   fields=['std2anat_xfm']),
                         name='select_std',
                         run_without_submitting=True)
    select_std.inputs.key = 'MNI152NLin2009cAsym'

    # List transforms
    mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms')

    # Warp segmentation into EPI space
    resample_parc = pe.Node(ApplyTransforms(
        float=True,
        input_image=str(
            get_template('MNI152NLin2009cAsym',
                         resolution=1,
                         desc='carpet',
                         suffix='dseg',
                         extensions=['.nii', '.nii.gz'])),
        dimension=3,
        default_value=0,
        interpolation='MultiLabel'),
                            name='resample_parc')

    # Carpetplot and confounds plot
    conf_plot = pe.Node(FMRISummary(tr=metadata['RepetitionTime'],
                                    confounds_list=[
                                        ('global_signal', None, 'GS'),
                                        ('csf', None, 'GSCSF'),
                                        ('white_matter', None, 'GSWM'),
                                        ('std_dvars', None, 'DVARS'),
                                        ('framewise_displacement', 'mm', 'FD')
                                    ]),
                        name='conf_plot',
                        mem_gb=mem_gb)
    ds_report_bold_conf = pe.Node(DerivativesDataSink(desc='carpetplot',
                                                      keep_dtype=True),
                                  name='ds_report_bold_conf',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow = Workflow(name=name)
    workflow.connect([
        (inputnode, select_std, [('std2anat_xfm', 'std2anat_xfm')]),
        (inputnode, mrg_xfms, [('t1_bold_xform', 'in1')]),
        (inputnode, resample_parc, [('bold_mask', 'reference_image')]),
        (select_std, mrg_xfms, [('std2anat_xfm', 'in2')]),
        (mrg_xfms, resample_parc, [('out', 'transforms')]),
        # Carpetplot
        (inputnode, conf_plot, [('bold', 'in_func'), ('bold_mask', 'in_mask'),
                                ('confounds_file', 'confounds_file')]),
        (resample_parc, conf_plot, [('output_image', 'in_segm')]),
        (conf_plot, ds_report_bold_conf, [('out_file', 'in_file')]),
        (conf_plot, outputnode, [('out_file', 'out_carpetplot')]),
    ])
    return workflow
Ejemplo n.º 21
0
def init_sdc_wf(fmaps, bold_meta, omp_nthreads=1,
                debug=False, fmap_bspline=False, fmap_demean=True):
    """
    This workflow implements the heuristics to choose a
    :abbr:`SDC (susceptibility distortion correction)` strategy.
    When no field map information is present within the BIDS inputs,
    the EXPERIMENTAL "fieldmap-less SyN" can be performed, using
    the ``--use-syn`` argument. When ``--force-syn`` is specified,
    then the "fieldmap-less SyN" is always executed and reported
    despite of other fieldmaps available with higher priority.
    In the latter case (some sort of fieldmap(s) is available and
    ``--force-syn`` is requested), then the :abbr:`SDC (susceptibility
    distortion correction)` method applied is that with the
    highest priority.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.fieldmap import init_sdc_wf
        wf = init_sdc_wf(
            fmaps=[{
                'type': 'phasediff',
                'phasediff': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
                'magnitude1': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
                'magnitude2': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
            }],
            bold_meta={
                'RepetitionTime': 2.0,
                'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
                'PhaseEncodingDirection': 'j',
            },
        )

    **Parameters**

        fmaps : list of pybids dicts
            A list of dictionaries with the available fieldmaps
            (and their metadata using the key ``'metadata'`` for the
            case of *epi* fieldmaps)
        bold_meta : dict
            BIDS metadata dictionary corresponding to the BOLD run
        omp_nthreads : int
            Maximum number of threads an individual process may use
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        debug : bool
            Enable debugging outputs

    **Inputs**
        bold_ref
            A BOLD reference calculated at a previous stage
        bold_ref_brain
            Same as above, but brain-masked
        bold_mask
            Brain mask for the BOLD run
        t1_brain
            T1w image, brain-masked, for the fieldmap-less SyN method
        t1_2_mni_reverse_transform
            MNI-to-T1w transform to map prior knowledge to the T1w
            fo the fieldmap-less SyN method
        template : str
            Name of template targeted by ``template`` output space


    **Outputs**
        bold_ref
            An unwarped BOLD reference
        bold_mask
            The corresponding new mask after unwarping
        bold_ref_brain
            Brain-extracted, unwarped BOLD reference
        out_warp
            The deformation field to unwarp the susceptibility distortions
        syn_bold_ref
            If ``--force-syn``, an unwarped BOLD reference with this
            method (for reporting purposes)

    """

    # TODO: To be removed (filter out unsupported fieldmaps):
    fmaps = [fmap for fmap in fmaps if fmap['type'] in FMAP_PRIORITY]

    workflow = Workflow(name='sdc_wf' if fmaps else 'sdc_bypass_wf')
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_ref', 'bold_ref_brain', 'bold_mask',
                't1_brain', 't1_2_mni_reverse_transform', 'template']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_ref', 'bold_mask', 'bold_ref_brain',
                'out_warp', 'syn_bold_ref', 'method']),
        name='outputnode')

    # No fieldmaps - forward inputs to outputs
    if not fmaps:
        outputnode.inputs.method = 'None'
        workflow.connect([
            (inputnode, outputnode, [('bold_ref', 'bold_ref'),
                                     ('bold_mask', 'bold_mask'),
                                     ('bold_ref_brain', 'bold_ref_brain')]),
        ])
        return workflow

    workflow.__postdesc__ = """\
Based on the estimated susceptibility distortion, an
unwarped BOLD reference was calculated for a more accurate
co-registration with the anatomical reference.
"""

    # In case there are multiple fieldmaps prefer EPI
    fmaps.sort(key=lambda fmap: FMAP_PRIORITY[fmap['type']])
    fmap = fmaps[0]

    # PEPOLAR path
    if fmap['type'] == 'epi':
        outputnode.inputs.method = 'PEB/PEPOLAR (phase-encoding based / PE-POLARity)'
        # Get EPI polarities and their metadata
        epi_fmaps = [(fmap_['epi'], fmap_['metadata']["PhaseEncodingDirection"])
                     for fmap_ in fmaps if fmap_['type'] == 'epi']
        sdc_unwarp_wf = init_pepolar_unwarp_wf(
            bold_meta=bold_meta,
            epi_fmaps=epi_fmaps,
            omp_nthreads=omp_nthreads,
            name='pepolar_unwarp_wf')

        workflow.connect([
            (inputnode, sdc_unwarp_wf, [
                ('bold_ref', 'inputnode.in_reference'),
                ('bold_mask', 'inputnode.in_mask'),
                ('bold_ref_brain', 'inputnode.in_reference_brain')]),
        ])

    # FIELDMAP path
    if fmap['type'] in ['fieldmap', 'phasediff']:
        outputnode.inputs.method = 'FMB (%s-based)' % fmap['type']
        # Import specific workflows here, so we don't break everything with one
        # unused workflow.
        if fmap['type'] == 'fieldmap':
            from .fmap import init_fmap_wf
            fmap_estimator_wf = init_fmap_wf(
                omp_nthreads=omp_nthreads,
                fmap_bspline=fmap_bspline)
            # set inputs
            fmap_estimator_wf.inputs.inputnode.fieldmap = fmap['fieldmap']
            fmap_estimator_wf.inputs.inputnode.magnitude = fmap['magnitude']

        if fmap['type'] == 'phasediff':
            from .phdiff import init_phdiff_wf
            fmap_estimator_wf = init_phdiff_wf(omp_nthreads=omp_nthreads)
            # set inputs
            fmap_estimator_wf.inputs.inputnode.phasediff = fmap['phasediff']
            fmap_estimator_wf.inputs.inputnode.magnitude = [
                fmap_ for key, fmap_ in sorted(fmap.items())
                if key.startswith("magnitude")
            ]

        sdc_unwarp_wf = init_sdc_unwarp_wf(
            omp_nthreads=omp_nthreads,
            fmap_demean=fmap_demean,
            debug=debug,
            name='sdc_unwarp_wf')
        sdc_unwarp_wf.inputs.inputnode.metadata = bold_meta

        workflow.connect([
            (inputnode, sdc_unwarp_wf, [
                ('bold_ref', 'inputnode.in_reference'),
                ('bold_ref_brain', 'inputnode.in_reference_brain'),
                ('bold_mask', 'inputnode.in_mask')]),
            (fmap_estimator_wf, sdc_unwarp_wf, [
                ('outputnode.fmap', 'inputnode.fmap'),
                ('outputnode.fmap_ref', 'inputnode.fmap_ref'),
                ('outputnode.fmap_mask', 'inputnode.fmap_mask')]),
        ])

    # FIELDMAP-less path
    if any(fm['type'] == 'syn' for fm in fmaps):
        syn_sdc_wf = init_syn_sdc_wf(
            bold_pe=bold_meta.get('PhaseEncodingDirection', None),
            omp_nthreads=omp_nthreads)

        workflow.connect([
            (inputnode, syn_sdc_wf, [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform'),
                ('bold_ref', 'inputnode.bold_ref'),
                ('bold_ref_brain', 'inputnode.bold_ref_brain'),
                ('template', 'inputnode.template')]),
        ])

        # XXX Eliminate branch when forcing isn't an option
        if fmap['type'] == 'syn':  # No fieldmaps, but --use-syn
            outputnode.inputs.method = 'FLB ("fieldmap-less", SyN-based)'
            sdc_unwarp_wf = syn_sdc_wf
        else:  # --force-syn was called when other fieldmap was present
            sdc_unwarp_wf.__desc__ = None
            workflow.connect([
                (syn_sdc_wf, outputnode, [
                    ('outputnode.out_reference', 'syn_bold_ref')]),
            ])

    workflow.connect([
        (sdc_unwarp_wf, outputnode, [
            ('outputnode.out_warp', 'out_warp'),
            ('outputnode.out_reference', 'bold_ref'),
            ('outputnode.out_reference_brain', 'bold_ref_brain'),
            ('outputnode.out_mask', 'bold_mask')]),
    ])

    return workflow
Ejemplo n.º 22
0
def init_bold_confs_wf(mem_gb, metadata, name="bold_confs_wf"):
    """
    This workflow calculates confounds for a BOLD series, and aggregates them
    into a :abbr:`TSV (tab-separated value)` file, for use as nuisance
    regressors in a :abbr:`GLM (general linear model)`.

    The following confounds are calculated, with column headings in parentheses:

    #. Region-wise average signal (``csf``, ``white_matter``, ``global_signal``)
    #. DVARS - original and standardized variants (``dvars``, ``std_dvars``)
    #. Framewise displacement, based on head-motion parameters
       (``framewise_displacement``)
    #. Temporal CompCor (``t_comp_cor_XX``)
    #. Anatomical CompCor (``a_comp_cor_XX``)
    #. Cosine basis set for high-pass filtering w/ 0.008 Hz cut-off
       (``cosine_XX``)
    #. Non-steady-state volumes (``non_steady_state_XX``)
    #. Estimated head-motion parameters, in mm and rad
       (``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)


    Prior to estimating aCompCor and tCompCor, non-steady-state volumes are
    censored and high-pass filtered using a :abbr:`DCT (discrete cosine
    transform)` basis.
    The cosine basis, as well as one regressor per censored volume, are included
    for convenience.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.confounds import init_bold_confs_wf
        wf = init_bold_confs_wf(
            mem_gb=1,
            metadata={})

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB - please note that this size
            should be calculated after resamplings that may extend
            the FoV
        metadata : dict
            BIDS metadata for BOLD file
        name : str
            Name of workflow (default: ``bold_confs_wf``)

    **Inputs**

        bold
            BOLD image, after the prescribed corrections (STC, HMC and SDC)
            when available.
        bold_mask
            BOLD series mask
        movpar_file
            SPM-formatted motion parameters file
        skip_vols
            number of non steady state volumes
        t1_mask
            Mask of the skull-stripped template image
        t1_tpms
            List of tissue probability maps in T1w space
        t1_bold_xform
            Affine matrix that maps the T1w space into alignment with
            the native BOLD space

    **Outputs**

        confounds_file
            TSV of all aggregated confounds
        rois_report
            Reportlet visualizing white-matter/CSF mask used for aCompCor,
            the ROI for tCompCor and the BOLD brain mask.

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
Several confounding time-series were calculated based on the
*preprocessed BOLD*: framewise displacement (FD), DVARS and
three region-wise global signals.
FD and DVARS are calculated for each functional run, both using their
implementations in *Nipype* [following the definitions by @power_fd_dvars].
The three global signals are extracted within the CSF, the WM, and
the whole-brain masks.
Additionally, a set of physiological regressors were extracted to
allow for component-based noise correction [*CompCor*, @compcor].
Principal components are estimated after high-pass filtering the
*preprocessed BOLD* time-series (using a discrete cosine filter with
128s cut-off) for the two *CompCor* variants: temporal (tCompCor)
and anatomical (aCompCor).
Six tCompCor components are then calculated from the top 5% variable
voxels within a mask covering the subcortical regions.
This subcortical mask is obtained by heavily eroding the brain mask,
which ensures it does not include cortical GM regions.
For aCompCor, six components are calculated within the intersection of
the aforementioned mask and the union of CSF and WM masks calculated
in T1w space, after their projection to the native space of each
functional run (using the inverse BOLD-to-T1w transformation).
The head-motion estimates calculated in the correction step were also
placed within the corresponding confounds file.
"""
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold', 'bold_mask', 'movpar_file', 'skip_vols',
                't1_mask', 't1_tpms', 't1_bold_xform']),
        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['confounds_file']),
        name='outputnode')

    # Get masks ready in T1w space
    acc_tpm = pe.Node(AddTPMs(indices=[0, 2]), name='tpms_add_csf_wm')  # acc stands for aCompCor
    csf_roi = pe.Node(TPM2ROI(erode_mm=0, mask_erode_mm=30), name='csf_roi')
    wm_roi = pe.Node(TPM2ROI(
        erode_prop=0.6, mask_erode_prop=0.6**3),  # 0.6 = radius; 0.6^3 = volume
        name='wm_roi')
    acc_roi = pe.Node(TPM2ROI(
        erode_prop=0.6, mask_erode_prop=0.6**3),  # 0.6 = radius; 0.6^3 = volume
        name='acc_roi')

    # Map ROIs in T1w space into BOLD space
    csf_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
                      name='csf_tfm', mem_gb=0.1)
    wm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
                     name='wm_tfm', mem_gb=0.1)
    acc_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
                      name='acc_tfm', mem_gb=0.1)
    tcc_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
                      name='tcc_tfm', mem_gb=0.1)

    # Ensure ROIs don't go off-limits (reduced FoV)
    csf_msk = pe.Node(niu.Function(function=_maskroi), name='csf_msk')
    wm_msk = pe.Node(niu.Function(function=_maskroi), name='wm_msk')
    acc_msk = pe.Node(niu.Function(function=_maskroi), name='acc_msk')
    tcc_msk = pe.Node(niu.Function(function=_maskroi), name='tcc_msk')

    # DVARS
    dvars = pe.Node(nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True),
                    name="dvars", mem_gb=mem_gb)

    # Frame displacement
    fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source="SPM"),
                    name="fdisp", mem_gb=mem_gb)

    # a/t-CompCor
    tcompcor = pe.Node(
        TCompCor(components_file='tcompcor.tsv', header_prefix='t_comp_cor_', pre_filter='cosine',
                 save_pre_filter=True, percentile_threshold=.05),
        name="tcompcor", mem_gb=mem_gb)

    acompcor = pe.Node(
        ACompCor(components_file='acompcor.tsv', header_prefix='a_comp_cor_', pre_filter='cosine',
                 save_pre_filter=True),
        name="acompcor", mem_gb=mem_gb)

    # Set TR if present
    if 'RepetitionTime' in metadata:
        tcompcor.inputs.repetition_time = metadata['RepetitionTime']
        acompcor.inputs.repetition_time = metadata['RepetitionTime']

    # Global and segment regressors
    mrg_lbl = pe.Node(niu.Merge(3), name='merge_rois', run_without_submitting=True)
    signals = pe.Node(SignalExtraction(class_labels=["csf", "white_matter", "global_signal"]),
                      name="signals", mem_gb=mem_gb)

    # Arrange confounds
    add_dvars_header = pe.Node(
        AddTSVHeader(columns=["dvars"]),
        name="add_dvars_header", mem_gb=0.01, run_without_submitting=True)
    add_std_dvars_header = pe.Node(
        AddTSVHeader(columns=["std_dvars"]),
        name="add_std_dvars_header", mem_gb=0.01, run_without_submitting=True)
    add_motion_headers = pe.Node(
        AddTSVHeader(columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]),
        name="add_motion_headers", mem_gb=0.01, run_without_submitting=True)
    concat = pe.Node(GatherConfounds(), name="concat", mem_gb=0.01, run_without_submitting=True)

    # Generate reportlet
    mrg_compcor = pe.Node(niu.Merge(2), name='merge_compcor', run_without_submitting=True)
    rois_plot = pe.Node(ROIsPlot(colors=['b', 'magenta'], generate_report=True),
                        name='rois_plot', mem_gb=mem_gb)

    ds_report_bold_rois = pe.Node(
        DerivativesDataSink(suffix='rois'),
        name='ds_report_bold_rois', run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _pick_csf(files):
        return files[0]

    def _pick_wm(files):
        return files[-1]

    workflow.connect([
        # Massage ROIs (in T1w space)
        (inputnode, acc_tpm, [('t1_tpms', 'in_files')]),
        (inputnode, csf_roi, [(('t1_tpms', _pick_csf), 'in_tpm'),
                              ('t1_mask', 'in_mask')]),
        (inputnode, wm_roi, [(('t1_tpms', _pick_wm), 'in_tpm'),
                             ('t1_mask', 'in_mask')]),
        (inputnode, acc_roi, [('t1_mask', 'in_mask')]),
        (acc_tpm, acc_roi, [('out_file', 'in_tpm')]),
        # Map ROIs to BOLD
        (inputnode, csf_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (csf_roi, csf_tfm, [('roi_file', 'input_image')]),
        (inputnode, wm_tfm, [('bold_mask', 'reference_image'),
                             ('t1_bold_xform', 'transforms')]),
        (wm_roi, wm_tfm, [('roi_file', 'input_image')]),
        (inputnode, acc_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (acc_roi, acc_tfm, [('roi_file', 'input_image')]),
        (inputnode, tcc_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (csf_roi, tcc_tfm, [('eroded_mask', 'input_image')]),
        # Mask ROIs with bold_mask
        (inputnode, csf_msk, [('bold_mask', 'in_mask')]),
        (inputnode, wm_msk, [('bold_mask', 'in_mask')]),
        (inputnode, acc_msk, [('bold_mask', 'in_mask')]),
        (inputnode, tcc_msk, [('bold_mask', 'in_mask')]),
        # connect inputnode to each non-anatomical confound node
        (inputnode, dvars, [('bold', 'in_file'),
                            ('bold_mask', 'in_mask')]),
        (inputnode, fdisp, [('movpar_file', 'in_file')]),

        # tCompCor
        (inputnode, tcompcor, [('bold', 'realigned_file')]),
        (inputnode, tcompcor, [('skip_vols', 'ignore_initial_volumes')]),
        (tcc_tfm, tcc_msk, [('output_image', 'roi_file')]),
        (tcc_msk, tcompcor, [('out', 'mask_files')]),

        # aCompCor
        (inputnode, acompcor, [('bold', 'realigned_file')]),
        (inputnode, acompcor, [('skip_vols', 'ignore_initial_volumes')]),
        (acc_tfm, acc_msk, [('output_image', 'roi_file')]),
        (acc_msk, acompcor, [('out', 'mask_files')]),

        # Global signals extraction (constrained by anatomy)
        (inputnode, signals, [('bold', 'in_file')]),
        (csf_tfm, csf_msk, [('output_image', 'roi_file')]),
        (csf_msk, mrg_lbl, [('out', 'in1')]),
        (wm_tfm, wm_msk, [('output_image', 'roi_file')]),
        (wm_msk, mrg_lbl, [('out', 'in2')]),
        (inputnode, mrg_lbl, [('bold_mask', 'in3')]),
        (mrg_lbl, signals, [('out', 'label_files')]),

        # Collate computed confounds together
        (inputnode, add_motion_headers, [('movpar_file', 'in_file')]),
        (dvars, add_dvars_header, [('out_nstd', 'in_file')]),
        (dvars, add_std_dvars_header, [('out_std', 'in_file')]),
        (signals, concat, [('out_file', 'signals')]),
        (fdisp, concat, [('out_file', 'fd')]),
        (tcompcor, concat, [('components_file', 'tcompcor'),
                            ('pre_filter_file', 'cos_basis')]),
        (acompcor, concat, [('components_file', 'acompcor')]),
        (add_motion_headers, concat, [('out_file', 'motion')]),
        (add_dvars_header, concat, [('out_file', 'dvars')]),
        (add_std_dvars_header, concat, [('out_file', 'std_dvars')]),

        # Set outputs
        (concat, outputnode, [('confounds_file', 'confounds_file')]),
        (inputnode, rois_plot, [('bold', 'in_file'),
                                ('bold_mask', 'in_mask')]),
        (tcompcor, mrg_compcor, [('high_variance_masks', 'in1')]),
        (acc_msk, mrg_compcor, [('out', 'in2')]),
        (mrg_compcor, rois_plot, [('out', 'in_rois')]),
        (rois_plot, ds_report_bold_rois, [('out_report', 'in_file')]),
    ])

    return workflow
Ejemplo n.º 23
0
def init_single_subject_wf(subject_id, task_id, echo_idx, name, reportlets_dir, output_dir,
                           bids_dir, ignore, debug, low_mem, anat_only, longitudinal, t2s_coreg,
                           omp_nthreads, skull_strip_template, skull_strip_fixed_seed,
                           freesurfer, output_spaces, template, medial_surface_nan,
                           cifti_output, hires, use_bbr, bold2t1w_dof, fmap_bspline, fmap_demean,
                           use_syn, force_syn, template_out_grid,
                           use_aroma, aroma_melodic_dim, ignore_aroma_err):
    """
    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.base import init_single_subject_wf
        wf = init_single_subject_wf(subject_id='test',
                                    task_id='',
                                    echo_idx=None,
                                    name='single_subject_wf',
                                    reportlets_dir='.',
                                    output_dir='.',
                                    bids_dir='.',
                                    ignore=[],
                                    debug=False,
                                    low_mem=False,
                                    anat_only=False,
                                    longitudinal=False,
                                    t2s_coreg=False,
                                    omp_nthreads=1,
                                    skull_strip_template='OASIS',
                                    skull_strip_fixed_seed=False,
                                    freesurfer=True,
                                    template='MNI152NLin2009cAsym',
                                    output_spaces=['T1w', 'fsnative',
                                                  'template', 'fsaverage5'],
                                    medial_surface_nan=False,
                                    cifti_output=False,
                                    hires=True,
                                    use_bbr=True,
                                    bold2t1w_dof=9,
                                    fmap_bspline=False,
                                    fmap_demean=True,
                                    use_syn=True,
                                    force_syn=True,
                                    template_out_grid='native',
                                    use_aroma=False,
                                    aroma_melodic_dim=-200,
                                    ignore_aroma_err=False)

    Parameters

        subject_id : str
            List of subject labels
        task_id : str or None
            Task ID of BOLD series to preprocess, or ``None`` to preprocess all
        echo_idx : int or None
            Index of echo to preprocess in multiecho BOLD series,
            or ``None`` to preprocess all
        name : str
            Name of workflow
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        debug : bool
            Enable debugging outputs
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        anat_only : bool
            Disable functional workflows
        longitudinal : bool
            Treat multiple sessions as longitudinal (may increase runtime)
            See sub-workflows for specific differences
        t2s_coreg : bool
            For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
        omp_nthreads : int
            Maximum number of threads an individual process may use
        skull_strip_template : str
            Name of ANTs skull-stripping template ('OASIS' or 'NKI')
        skull_strip_fixed_seed : bool
            Do not use a random seed for skull-stripping - will ensure
            run-to-run replicability when used with --omp-nthreads 1
        reportlets_dir : str
            Directory in which to save reportlets
        output_dir : str
            Directory in which to save derivatives
        bids_dir : str
            Root directory of BIDS dataset
        freesurfer : bool
            Enable FreeSurfer surface reconstruction (may increase runtime)
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output spaces.

            Valid spaces:

             - T1w
             - template
             - fsnative
             - fsaverage (or other pre-existing FreeSurfer templates)
        template : str
            Name of template targeted by ``template`` output space
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        hires : bool
            Enable sub-millimeter preprocessing in FreeSurfer
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors

    Inputs

        subjects_dir
            FreeSurfer SUBJECTS_DIR

    """
    if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
            'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
        }
        layout = None
    else:
        subject_data, layout = collect_data(bids_dir, subject_id, task_id, echo_idx)

    # Make sure we always go through these two checks
    if not anat_only and subject_data['bold'] == []:
        raise Exception("No BOLD images found for participant {} and task {}. "
                        "All workflows require BOLD images.".format(
                            subject_id, task_id if task_id else '<all>'))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPprep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### References

""".format(nilearn_ver=nilearn_ver)

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(), name='bids_info', run_without_submitting=True)

    summary = pe.Node(SubjectSummary(output_spaces=output_spaces, template=template),
                      name='summary', run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about', run_without_submitting=True)

    ds_report_summary = pe.Node(
        DerivativesDataSink(base_directory=reportlets_dir,
                            suffix='summary'),
        name='ds_report_summary', run_without_submitting=True)

    ds_report_about = pe.Node(
        DerivativesDataSink(base_directory=reportlets_dir,
                            suffix='about'),
        name='ds_report_about', run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(name="anat_preproc_wf",
                                           skull_strip_template=skull_strip_template,
                                           skull_strip_fixed_seed=skull_strip_fixed_seed,
                                           output_spaces=output_spaces,
                                           template=template,
                                           debug=debug,
                                           longitudinal=longitudinal,
                                           omp_nthreads=omp_nthreads,
                                           freesurfer=freesurfer,
                                           hires=hires,
                                           reportlets_dir=reportlets_dir,
                                           output_dir=output_dir,
                                           num_t1w=len(subject_data['t1w']))

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'),
                            ('t2w', 't2w'),
                            ('bold', 'bold')]),
        (bids_info, summary, [('subject_id', 'subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (summary, anat_preproc_wf, [('subject_id', 'inputnode.subject_id')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    if anat_only:
        return workflow

    for bold_file in subject_data['bold']:
        func_preproc_wf = init_func_preproc_wf(bold_file=bold_file,
                                               layout=layout,
                                               ignore=ignore,
                                               freesurfer=freesurfer,
                                               use_bbr=use_bbr,
                                               t2s_coreg=t2s_coreg,
                                               bold2t1w_dof=bold2t1w_dof,
                                               reportlets_dir=reportlets_dir,
                                               output_spaces=output_spaces,
                                               template=template,
                                               medial_surface_nan=medial_surface_nan,
                                               cifti_output=cifti_output,
                                               output_dir=output_dir,
                                               omp_nthreads=omp_nthreads,
                                               low_mem=low_mem,
                                               fmap_bspline=fmap_bspline,
                                               fmap_demean=fmap_demean,
                                               use_syn=use_syn,
                                               force_syn=force_syn,
                                               debug=debug,
                                               template_out_grid=template_out_grid,
                                               use_aroma=use_aroma,
                                               aroma_melodic_dim=aroma_melodic_dim,
                                               ignore_aroma_err=ignore_aroma_err,
                                               num_bold=len(subject_data['bold']))

        workflow.connect([
            (anat_preproc_wf, func_preproc_wf,
             [('outputnode.t1_preproc', 'inputnode.t1_preproc'),
              ('outputnode.t1_brain', 'inputnode.t1_brain'),
              ('outputnode.t1_mask', 'inputnode.t1_mask'),
              ('outputnode.t1_seg', 'inputnode.t1_seg'),
              ('outputnode.t1_aseg', 'inputnode.t1_aseg'),
              ('outputnode.t1_aparc', 'inputnode.t1_aparc'),
              ('outputnode.t1_tpms', 'inputnode.t1_tpms'),
              ('outputnode.t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'),
              ('outputnode.t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform'),
              # Undefined if --no-freesurfer, but this is safe
              ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
              ('outputnode.subject_id', 'inputnode.subject_id'),
              ('outputnode.t1_2_fsnative_forward_transform',
               'inputnode.t1_2_fsnative_forward_transform'),
              ('outputnode.t1_2_fsnative_reverse_transform',
               'inputnode.t1_2_fsnative_reverse_transform')]),
        ])

    return workflow
Ejemplo n.º 24
0
def init_fmap_wf(omp_nthreads, fmap_bspline, name='fmap_wf'):
    """
    Fieldmap workflow - when we have a sequence that directly measures the fieldmap
    we just need to mask it (using the corresponding magnitude image) to remove the
    noise in the surrounding air region, and ensure that units are Hz.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.fieldmap.fmap import init_fmap_wf
        wf = init_fmap_wf(omp_nthreads=6, fmap_bspline=False)

    """

    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['magnitude', 'fieldmap']), name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['fmap', 'fmap_ref', 'fmap_mask']),
                         name='outputnode')

    # Merge input magnitude images
    magmrg = pe.Node(IntraModalMerge(), name='magmrg')
    # Merge input fieldmap images
    fmapmrg = pe.Node(IntraModalMerge(zero_based_avg=False, hmc=False),
                      name='fmapmrg')

    # de-gradient the fields ("bias/illumination artifact")
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
                         name='n4_correct', n_procs=omp_nthreads)
    bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True),
                  name='bet')
    ds_fmap_mask = pe.Node(DerivativesDataSink(suffix='fmap_mask'),
                           name='ds_report_fmap_mask', run_without_submitting=True)

    workflow.connect([
        (inputnode, magmrg, [('magnitude', 'in_files')]),
        (inputnode, fmapmrg, [('fieldmap', 'in_files')]),
        (magmrg, n4_correct, [('out_file', 'input_image')]),
        (n4_correct, bet, [('output_image', 'in_file')]),
        (bet, outputnode, [('mask_file', 'fmap_mask'),
                           ('out_file', 'fmap_ref')]),
        (inputnode, ds_fmap_mask, [('fieldmap', 'source_file')]),
        (bet, ds_fmap_mask, [('out_report', 'in_file')]),
    ])

    if fmap_bspline:
        # despike_threshold=1.0, mask_erode=1),
        fmapenh = pe.Node(FieldEnhance(unwrap=False, despike=False),
                          name='fmapenh', mem_gb=4, n_procs=omp_nthreads)

        workflow.connect([
            (bet, fmapenh, [('mask_file', 'in_mask'),
                            ('out_file', 'in_magnitude')]),
            (fmapmrg, fmapenh, [('out_file', 'in_file')]),
            (fmapenh, outputnode, [('out_file', 'fmap')]),
        ])

    else:
        torads = pe.Node(FieldToRadS(), name='torads')
        prelude = pe.Node(fsl.PRELUDE(), name='prelude')
        tohz = pe.Node(FieldToHz(), name='tohz')

        denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere',
                                            kernel_size=3), name='denoise')
        demean = pe.Node(niu.Function(function=demean_image), name='demean')
        cleanup_wf = cleanup_edge_pipeline(name='cleanup_wf')

        applymsk = pe.Node(fsl.ApplyMask(), name='applymsk')

        workflow.connect([
            (bet, prelude, [('mask_file', 'mask_file'),
                            ('out_file', 'magnitude_file')]),
            (fmapmrg, torads, [('out_file', 'in_file')]),
            (torads, tohz, [('fmap_range', 'range_hz')]),
            (torads, prelude, [('out_file', 'phase_file')]),
            (prelude, tohz, [('unwrapped_phase_file', 'in_file')]),
            (tohz, denoise, [('out_file', 'in_file')]),
            (denoise, demean, [('out_file', 'in_file')]),
            (demean, cleanup_wf, [('out', 'inputnode.in_file')]),
            (bet, cleanup_wf, [('mask_file', 'inputnode.in_mask')]),
            (cleanup_wf, applymsk, [('outputnode.out_file', 'in_file')]),
            (bet, applymsk, [('mask_file', 'mask_file')]),
            (applymsk, outputnode, [('out_file', 'fmap')]),
        ])

    return workflow
Ejemplo n.º 25
0
def init_fmriprep_wf(subject_list, task_id, echo_idx, run_uuid, work_dir, output_dir, bids_dir,
                     ignore, debug, low_mem, anat_only, longitudinal, t2s_coreg,
                     omp_nthreads, skull_strip_template, skull_strip_fixed_seed,
                     freesurfer, output_spaces, template, medial_surface_nan, cifti_output, hires,
                     use_bbr, bold2t1w_dof, fmap_bspline, fmap_demean, use_syn, force_syn,
                     use_aroma, ignore_aroma_err, aroma_melodic_dim, template_out_grid):
    """
    This workflow organizes the execution of FMRIPREP, with a sub-workflow for
    each subject.

    If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is
    created and populated with any needed template subjects.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        import os
        os.environ['FREESURFER_HOME'] = os.getcwd()
        from fmriprep.workflows.base import init_fmriprep_wf
        wf = init_fmriprep_wf(subject_list=['fmripreptest'],
                              task_id='',
                              echo_idx=None,
                              run_uuid='X',
                              work_dir='.',
                              output_dir='.',
                              bids_dir='.',
                              ignore=[],
                              debug=False,
                              low_mem=False,
                              anat_only=False,
                              longitudinal=False,
                              t2s_coreg=False,
                              omp_nthreads=1,
                              skull_strip_template='OASIS',
                              skull_strip_fixed_seed=False,
                              freesurfer=True,
                              output_spaces=['T1w', 'fsnative',
                                            'template', 'fsaverage5'],
                              template='MNI152NLin2009cAsym',
                              medial_surface_nan=False,
                              cifti_output=False,
                              hires=True,
                              use_bbr=True,
                              bold2t1w_dof=9,
                              fmap_bspline=False,
                              fmap_demean=True,
                              use_syn=True,
                              force_syn=True,
                              use_aroma=False,
                              ignore_aroma_err=False,
                              aroma_melodic_dim=-200,
                              template_out_grid='native')


    Parameters

        subject_list : list
            List of subject labels
        task_id : str or None
            Task ID of BOLD series to preprocess, or ``None`` to preprocess all
        echo_idx : int or None
            Index of echo to preprocess in multiecho BOLD series,
            or ``None`` to preprocess all
        run_uuid : str
            Unique identifier for execution instance
        work_dir : str
            Directory in which to store workflow execution state and temporary files
        output_dir : str
            Directory in which to save derivatives
        bids_dir : str
            Root directory of BIDS dataset
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        debug : bool
            Enable debugging outputs
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        anat_only : bool
            Disable functional workflows
        longitudinal : bool
            Treat multiple sessions as longitudinal (may increase runtime)
            See sub-workflows for specific differences
        t2s_coreg : bool
            For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
        omp_nthreads : int
            Maximum number of threads an individual process may use
        skull_strip_template : str
            Name of ANTs skull-stripping template ('OASIS' or 'NKI')
        skull_strip_fixed_seed : bool
            Do not use a random seed for skull-stripping - will ensure
            run-to-run replicability when used with --omp-nthreads 1
        freesurfer : bool
            Enable FreeSurfer surface reconstruction (may increase runtime)
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output spaces.

            Valid spaces:

             - T1w
             - template
             - fsnative
             - fsaverage (or other pre-existing FreeSurfer templates)
        template : str
            Name of template targeted by ``template`` output space
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        hires : bool
            Enable sub-millimeter preprocessing in FreeSurfer
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization

    """
    fmriprep_wf = Workflow(name='fmriprep_wf')
    fmriprep_wf.base_dir = work_dir

    if freesurfer:
        fsdir = pe.Node(
            BIDSFreeSurferDir(
                derivatives=output_dir,
                freesurfer_home=os.getenv('FREESURFER_HOME'),
                spaces=output_spaces),
            name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)

    reportlets_dir = os.path.join(work_dir, 'reportlets')
    for subject_id in subject_list:
        single_subject_wf = init_single_subject_wf(
            subject_id=subject_id,
            task_id=task_id,
            echo_idx=echo_idx,
            name="single_subject_" + subject_id + "_wf",
            reportlets_dir=reportlets_dir,
            output_dir=output_dir,
            bids_dir=bids_dir,
            ignore=ignore,
            debug=debug,
            low_mem=low_mem,
            anat_only=anat_only,
            longitudinal=longitudinal,
            t2s_coreg=t2s_coreg,
            omp_nthreads=omp_nthreads,
            skull_strip_template=skull_strip_template,
            skull_strip_fixed_seed=skull_strip_fixed_seed,
            freesurfer=freesurfer,
            output_spaces=output_spaces,
            template=template,
            medial_surface_nan=medial_surface_nan,
            cifti_output=cifti_output,
            hires=hires,
            use_bbr=use_bbr,
            bold2t1w_dof=bold2t1w_dof,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            use_syn=use_syn,
            force_syn=force_syn,
            template_out_grid=template_out_grid,
            use_aroma=use_aroma,
            aroma_melodic_dim=aroma_melodic_dim,
            ignore_aroma_err=ignore_aroma_err,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (
            os.path.join(output_dir, "fmriprep", "sub-" + subject_id, 'log', run_uuid)
        )
        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)
        if freesurfer:
            fmriprep_wf.connect(fsdir, 'subjects_dir',
                                single_subject_wf, 'inputnode.subjects_dir')
        else:
            fmriprep_wf.add_nodes([single_subject_wf])

    return fmriprep_wf
Ejemplo n.º 26
0
def init_bold_reg_wf(
    freesurfer,
    use_bbr,
    bold2t1w_dof,
    bold2t1w_init,
    mem_gb,
    omp_nthreads,
    name='bold_reg_wf',
    sloppy=False,
    write_report=True,
):
    """
    Build a workflow to run same-subject, BOLD-to-T1w image-registration.

    Calculates the registration between a reference BOLD image and T1w-space
    using a boundary-based registration (BBR) cost function.
    If FreeSurfer-based preprocessing is enabled, the ``bbregister`` utility
    is used to align the BOLD images to the reconstructed subject, and the
    resulting transform is adjusted to target the T1 space.
    If FreeSurfer-based preprocessing is disabled, FSL FLIRT is used with the
    BBR cost function to directly target the T1 space.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.registration import init_bold_reg_wf
            wf = init_bold_reg_wf(freesurfer=True,
                                  mem_gb=3,
                                  omp_nthreads=1,
                                  use_bbr=True,
                                  bold2t1w_dof=9,
                                  bold2t1w_init='register')

    Parameters
    ----------
    freesurfer : :obj:`bool`
        Enable FreeSurfer functional registration (bbregister)
    use_bbr : :obj:`bool` or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    bold2t1w_init : str, 'header' or 'register'
        If ``'header'``, use header information for initialization of BOLD and T1 images.
        If ``'register'``, align volumes by their centers.
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_reg_wf``)
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from BOLD to T1
    write_report : :obj:`bool`
        Whether a reportlet should be stored

    Inputs
    ------
    ref_bold_brain
        Reference image to which BOLD series is aligned
        If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
    t1w_brain
        Skull-stripped ``t1w_preproc``
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    itk_t1_to_bold
        Affine transform from T1 space to BOLD space (ITK format)
    fallback
        Boolean indicating whether BBR was rejected (mri_coreg registration returned)

    See Also
    --------
      * :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
      * :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`

    """
    from ...interfaces import DerivativesDataSink
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow

    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'ref_bold_brain', 't1w_brain', 't1w_dseg', 'subjects_dir',
        'subject_id', 'fsnative2t1w_xfm'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['itk_bold_to_t1', 'itk_t1_to_bold', 'fallback']),
                         name='outputnode')

    # Default to no bbr for the moment
    use_bbr = False
    if freesurfer:
        bbr_wf = init_bbreg_wf(use_bbr=use_bbr,
                               bold2t1w_dof=bold2t1w_dof,
                               bold2t1w_init=bold2t1w_init,
                               omp_nthreads=omp_nthreads)
    else:
        bbr_wf = init_fsl_bbr_wf(use_bbr=use_bbr,
                                 bold2t1w_dof=bold2t1w_dof,
                                 bold2t1w_init=bold2t1w_init,
                                 sloppy=sloppy)

    workflow.connect([
        (inputnode, bbr_wf, [('ref_bold_brain', 'inputnode.in_file'),
                             ('fsnative2t1w_xfm',
                              'inputnode.fsnative2t1w_xfm'),
                             ('subjects_dir', 'inputnode.subjects_dir'),
                             ('subject_id', 'inputnode.subject_id'),
                             ('t1w_dseg', 'inputnode.t1w_dseg'),
                             ('t1w_brain', 'inputnode.t1w_brain')]),
        (bbr_wf, outputnode, [('outputnode.itk_bold_to_t1', 'itk_bold_to_t1'),
                              ('outputnode.itk_t1_to_bold', 'itk_t1_to_bold'),
                              ('outputnode.fallback', 'fallback')]),
    ])

    if write_report:
        ds_report_reg = pe.Node(DerivativesDataSink(
            datatype="figures", dismiss_entities=("echo", )),
                                name='ds_report_reg',
                                run_without_submitting=True,
                                mem_gb=mem_gb)

        def _bold_reg_suffix(fallback, freesurfer):
            if fallback:
                return 'coreg' if freesurfer else 'flirtnobbr'
            return 'bbregister' if freesurfer else 'flirtbbr'

        workflow.connect([
            (bbr_wf, ds_report_reg, [('outputnode.out_report', 'in_file'),
                                     (('outputnode.fallback', _bold_reg_suffix,
                                       freesurfer), 'desc')]),
        ])

    return workflow
Ejemplo n.º 27
0
def init_single_subject_wf(subject_id):
    """
    Organize the preprocessing pipeline for a single subject.

    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.
    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fprodents.workflows.tests import mock_config
            from fprodents.workflows.base import init_single_subject_wf
            with mock_config():
                wf = init_single_subject_wf('01')

    Parameters
    ----------
    subject_id : :obj:`str`
        Subject label for this single-subject workflow.

    Inputs
    ------
    subjects_dir : :obj:`str`
        FreeSurfer's ``$SUBJECTS_DIR``.

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.bids import BIDSInfo
    from niworkflows.interfaces.nilearn import NILEARN_VERSION
    from niworkflows.utils.bids import collect_data
    from niworkflows.utils.connections import listify
    from niworkflows.utils.spaces import Reference
    from niworkflows.workflows.epi.refmap import init_epi_reference_wf
    from ..patch.interfaces import BIDSDataGrabber
    from ..patch.utils import extract_entities, fix_multi_source_name
    from ..patch.workflows.anatomical import init_anat_preproc_wf

    subject_data = collect_data(
        config.execution.layout,
        subject_id,
        config.execution.task_id,
        config.execution.echo_idx,
        bids_filters=config.execution.bids_filters,
    )[0]

    anat_only = config.workflow.anat_only
    # Make sure we always go through these two checks
    if not anat_only and not subject_data["bold"]:
        task_id = config.execution.task_id
        raise RuntimeError(
            f"No BOLD images found for participant <{subject_id}> and "
            f"task <{task_id or 'all'}>. All workflows require BOLD images.")

    workflow = Workflow(name=f"single_subject_{subject_id}_wf")
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep-rodents* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(
        fmriprep_ver=config.environment.version,
        nipype_ver=config.environment.nipype_version,
    )
    workflow.__postdesc__ = """

Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep-rodents.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").


### Copyright Waiver

The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.

### References

""".format(nilearn_ver=NILEARN_VERSION)

    spaces = config.workflow.spaces
    output_dir = str(config.execution.output_dir)

    inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]),
                        name="inputnode")

    bidssrc = pe.Node(
        BIDSDataGrabber(subject_data=subject_data,
                        anat_only=anat_only,
                        subject_id=subject_id),
        name="bidssrc",
    )

    bids_info = pe.Node(
        BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False),
        name="bids_info",
    )

    summary = pe.Node(
        SubjectSummary(
            std_spaces=spaces.get_spaces(nonstandard=False),
            nstd_spaces=spaces.get_spaces(standard=False),
        ),
        name="summary",
        run_without_submitting=True,
    )

    about = pe.Node(
        AboutSummary(version=config.environment.version,
                     command=" ".join(sys.argv)),
        name="about",
        run_without_submitting=True,
    )

    ds_report_summary = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            desc="summary",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_summary",
        run_without_submitting=True,
    )

    ds_report_about = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            desc="about",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_about",
        run_without_submitting=True,
    )

    anat_derivatives = config.execution.anat_derivatives
    if anat_derivatives:
        from smriprep.utils.bids import collect_derivatives

        std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, ))
        anat_derivatives = collect_derivatives(
            anat_derivatives.absolute(),
            subject_id,
            std_spaces,
            False,
        )
        if anat_derivatives is None:
            config.loggers.workflow.warning(f"""\
Attempted to access pre-existing anatomical derivatives at \
<{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \
were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>.""")

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=str(config.execution.bids_dir),
        debug=config.execution.debug is True,
        existing_derivatives=anat_derivatives,
        longitudinal=config.workflow.longitudinal,
        omp_nthreads=config.nipype.omp_nthreads,
        output_dir=output_dir,
        skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed,
        skull_strip_mode=config.workflow.skull_strip_t1w,
        skull_strip_template=Reference.from_string(
            config.workflow.skull_strip_template)[0],
        spaces=spaces,
        t2w=subject_data["t2w"],
    )

    # fmt:off
    workflow.connect([
        (bidssrc, bids_info, [(('t2w', fix_multi_source_name), 'in_file')]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bidssrc, anat_preproc_wf, [('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi')]),
        (bidssrc, ds_report_summary, [(('t2w', fix_multi_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t2w', fix_multi_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])
    # fmt:on

    # Overwrite ``out_path_base`` of smriprep's DataSinks
    for node in workflow.list_node_names():
        if node.split(".")[-1].startswith("ds_"):
            workflow.get_node(node).interface.out_path_base = "fmriprep"

    if anat_only:
        return workflow

    # Append the functional section to the existing anatomical exerpt
    # That way we do not need to stream down the number of bold datasets
    anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=len(subject_data["bold"])))

    for bold_file in subject_data["bold"]:
        echoes = extract_entities(bold_file).get("echo", [])
        echo_idxs = listify(echoes)
        multiecho = len(echo_idxs) > 2

        #  The default N4 shrink factor (4) appears to artificially blur values across
        #  anisotropic voxels. Shrink factors are intended to speed up calculation
        #  but in most cases, the extra calculation time appears to be minimal.
        #  Similarly, the use of an asymmetric bspline grid improves performance
        #  in anisotropic voxels. The number of N4 iterations are also reduced.
        bold_ref_wf = init_epi_reference_wf(
            auto_bold_nss=True,
            omp_nthreads=config.nipype.omp_nthreads,
            n4_iter=4,
            adaptive_bspline_grid=True,
            shrink_factor=1,
        )
        bold_ref_wf.inputs.inputnode.in_files = (bold_file if not multiecho
                                                 else bold_file[0])

        func_preproc_wf = init_func_preproc_wf(bold_file)

        # fmt:off
        workflow.connect([
            (anat_preproc_wf, func_preproc_wf,
             [('outputnode.t2w_preproc', 'inputnode.anat_preproc'),
              ('outputnode.t2w_mask', 'inputnode.anat_mask'),
              ('outputnode.t2w_dseg', 'inputnode.anat_dseg'),
              ('outputnode.t2w_tpms', 'inputnode.anat_tpms'),
              ('outputnode.template', 'inputnode.template'),
              ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm')]),
            (bold_ref_wf, func_preproc_wf,
             [('outputnode.epi_ref_file', 'inputnode.ref_file'),
              ('outputnode.xfm_files', 'inputnode.bold_ref_xfm'),
              ('outputnode.validation_report', 'inputnode.validation_report'),
              (('outputnode.n_dummy', _pop), 'inputnode.n_dummy_scans')]),
        ])
        # fmt:on
    return workflow
Ejemplo n.º 28
0
def init_fsl_bbr_wf(use_bbr,
                    bold2t1w_dof,
                    bold2t1w_init,
                    sloppy=False,
                    name='fsl_bbr_wf'):
    """
    Build a workflow to run FSL's ``flirt``.

    This workflow uses FSL FLIRT to register a BOLD image to a T1-weighted
    structural image, using a boundary-based registration (BBR) cost function.
    It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`,
    which performs the same task using FreeSurfer's ``bbregister``.

    The ``use_bbr`` option permits a high degree of control over registration.
    If ``False``, standard, rigid coregistration will be performed by FLIRT.
    If ``True``, FLIRT-BBR will be seeded with the initial transform found by
    the rigid coregistration.
    If ``None``, after FLIRT-BBR is run, the resulting affine transform
    will be compared to the initial transform found by FLIRT.
    Excessive deviation will result in rejecting the BBR refinement and
    accepting the original, affine registration.

    Workflow Graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.registration import init_fsl_bbr_wf
            wf = init_fsl_bbr_wf(use_bbr=True, bold2t1w_dof=9, bold2t1w_init='register')


    Parameters
    ----------
    use_bbr : :obj:`bool` or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    bold2t1w_init : str, 'header' or 'register'
        If ``'header'``, use header information for initialization of BOLD and T1 images.
        If ``'register'``, align volumes by their centers.
    name : :obj:`str`, optional
        Workflow name (default: fsl_bbr_wf)

    Inputs
    ------
    in_file
        Reference BOLD image to be registered
    t1w_brain
        Skull-stripped T1-weighted structural image
    t1w_dseg
        FAST segmentation of ``t1w_brain``
    fsnative2t1w_xfm
        Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
    subjects_dir
        Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
    subject_id
        Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)

    Outputs
    -------
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1w space (ITK format)
    itk_t1_to_bold
        Affine transform from T1 space to BOLD space (ITK format)
    out_report
        Reportlet for assessing registration quality
    fallback
        Boolean indicating whether BBR was rejected (rigid FLIRT registration returned)

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.utils.images import dseg_label as _dseg_label
    from niworkflows.interfaces.freesurfer import PatchedLTAConvert as LTAConvert
    from niworkflows.interfaces.reportlets.registration import FLIRTRPT
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`flirt` [FSL {fsl_ver}, @flirt] with the boundary-based registration [@bbr]
cost-function.
Co-registration was configured with nine degrees of freedom to account
for distortions remaining in the BOLD reference.
""".format(fsl_ver=FLIRTRPT().version or '<ver>')

    inputnode = pe.Node(
        niu.IdentityInterface([
            'in_file',
            'fsnative2t1w_xfm',
            'subjects_dir',
            'subject_id',  # BBRegister
            't1w_dseg',
            't1w_brain'
        ]),  # FLIRT BBR
        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        ['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
                         name='outputnode')

    wm_mask = pe.Node(niu.Function(function=_dseg_label), name='wm_mask')
    wm_mask.inputs.label = 2  # BIDS default is WM=2
    flt_bbr_init = pe.Node(FLIRTRPT(dof=6,
                                    generate_report=not use_bbr,
                                    uses_qform=True),
                           name='flt_bbr_init')

    if bold2t1w_init not in ("register", "header"):
        raise ValueError(
            f"Unknown BOLD-T1w initialization option: {bold2t1w_init}")

    if bold2t1w_init == "header":
        raise NotImplementedError(
            "Header-based registration initialization not supported for FSL")

    invt_bbr = pe.Node(fsl.ConvertXFM(invert_xfm=True),
                       name='invt_bbr',
                       mem_gb=DEFAULT_MEMORY_MIN_GB)

    # BOLD to T1 transform matrix is from fsl, using c3 tools to convert to
    # something ANTs will like.
    fsl2itk_fwd = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_fwd',
                          mem_gb=DEFAULT_MEMORY_MIN_GB)
    fsl2itk_inv = pe.Node(c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
                          name='fsl2itk_inv',
                          mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, flt_bbr_init, [('in_file', 'in_file'),
                                   ('t1w_brain', 'reference')]),
        (inputnode, fsl2itk_fwd, [('t1w_brain', 'reference_file'),
                                  ('in_file', 'source_file')]),
        (inputnode, fsl2itk_inv, [('in_file', 'reference_file'),
                                  ('t1w_brain', 'source_file')]),
        (invt_bbr, fsl2itk_inv, [('out_file', 'transform_file')]),
        (fsl2itk_fwd, outputnode, [('itk_transform', 'itk_bold_to_t1')]),
        (fsl2itk_inv, outputnode, [('itk_transform', 'itk_t1_to_bold')]),
    ])

    # Short-circuit workflow building, use rigid registration
    if use_bbr is False:
        workflow.connect([
            (flt_bbr_init, invt_bbr, [('out_matrix_file', 'in_file')]),
            (flt_bbr_init, fsl2itk_fwd, [('out_matrix_file', 'transform_file')
                                         ]),
            (flt_bbr_init, outputnode, [('out_report', 'out_report')]),
        ])
        outputnode.inputs.fallback = True

        return workflow

    flt_bbr = pe.Node(FLIRTRPT(cost_func='bbr',
                               dof=bold2t1w_dof,
                               generate_report=True),
                      name='flt_bbr')

    FSLDIR = os.getenv('FSLDIR')
    if FSLDIR:
        flt_bbr.inputs.schedule = os.path.join(FSLDIR, 'etc/flirtsch/bbr.sch')
    else:
        # Should mostly be hit while building docs
        LOGGER.warning("FSLDIR unset - using packaged BBR schedule")
        flt_bbr.inputs.schedule = pkgr.resource_filename(
            'fmriprep', 'data/flirtsch/bbr.sch')

    workflow.connect([
        (inputnode, wm_mask, [('t1w_dseg', 'in_seg')]),
        (inputnode, flt_bbr, [('in_file', 'in_file')]),
        (flt_bbr_init, flt_bbr, [('out_matrix_file', 'in_matrix_file')]),
    ])

    if sloppy is True:
        downsample = pe.Node(niu.Function(
            function=_conditional_downsampling,
            output_names=["out_file", "out_mask"]),
                             name='downsample')
        workflow.connect([
            (inputnode, downsample, [("t1w_brain", "in_file")]),
            (wm_mask, downsample, [("out", "in_mask")]),
            (downsample, flt_bbr, [('out_file', 'reference'),
                                   ('out_mask', 'wm_seg')]),
        ])
    else:
        workflow.connect([
            (inputnode, flt_bbr, [('t1w_brain', 'reference')]),
            (wm_mask, flt_bbr, [('out', 'wm_seg')]),
        ])

    # Short-circuit workflow building, use boundary-based registration
    if use_bbr is True:
        workflow.connect([
            (flt_bbr, invt_bbr, [('out_matrix_file', 'in_file')]),
            (flt_bbr, fsl2itk_fwd, [('out_matrix_file', 'transform_file')]),
            (flt_bbr, outputnode, [('out_report', 'out_report')]),
        ])
        outputnode.inputs.fallback = False

        return workflow

    transforms = pe.Node(niu.Merge(2),
                         run_without_submitting=True,
                         name='transforms')
    reports = pe.Node(niu.Merge(2),
                      run_without_submitting=True,
                      name='reports')

    compare_transforms = pe.Node(niu.Function(function=compare_xforms),
                                 name='compare_transforms')

    select_transform = pe.Node(niu.Select(),
                               run_without_submitting=True,
                               name='select_transform')
    select_report = pe.Node(niu.Select(),
                            run_without_submitting=True,
                            name='select_report')

    fsl_to_lta = pe.MapNode(LTAConvert(out_lta=True),
                            iterfield=['in_fsl'],
                            name='fsl_to_lta')

    workflow.connect([
        (flt_bbr, transforms, [('out_matrix_file', 'in1')]),
        (flt_bbr_init, transforms, [('out_matrix_file', 'in2')]),
        # Convert FSL transforms to LTA (RAS2RAS) transforms and compare
        (inputnode, fsl_to_lta, [('in_file', 'source_file'),
                                 ('t1w_brain', 'target_file')]),
        (transforms, fsl_to_lta, [('out', 'in_fsl')]),
        (fsl_to_lta, compare_transforms, [('out_lta', 'lta_list')]),
        (compare_transforms, outputnode, [('out', 'fallback')]),
        # Select output transform
        (transforms, select_transform, [('out', 'inlist')]),
        (compare_transforms, select_transform, [('out', 'index')]),
        (select_transform, invt_bbr, [('out', 'in_file')]),
        (select_transform, fsl2itk_fwd, [('out', 'transform_file')]),
        (flt_bbr, reports, [('out_report', 'in1')]),
        (flt_bbr_init, reports, [('out_report', 'in2')]),
        (reports, select_report, [('out', 'inlist')]),
        (compare_transforms, select_report, [('out', 'index')]),
        (select_report, outputnode, [('out', 'out_report')]),
    ])

    return workflow
Ejemplo n.º 29
0
def init_correlation_wf(name="correlation_wf"):
    """
    This workflow calculates betaseries correlations using a parcellation
    from an atlas.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from nibetaseries.workflows.analysis import init_correlation_wf
        wf = init_correlation_wf()

    Parameters
    ----------

        name : str
            Name of workflow (default: ``correlation_wf``)

    Inputs
    ------

        betaseries_files
            list of betaseries files
        atlas_file
            atlas file with indexed regions of interest
        atlas_lut
            atlas look up table (tsv) with a column for regions
            and a column for what number the label corresponds to.

    Outputs
    -------

        correlation_matrix
            a matrix (tsv) file denoting all roi-roi correlations
        correlation_fig
            a svg file of a circular connectivity plot showing all roi-roi correlations
    """
    workflow = Workflow(name=name)

    workflow.__desc__ = """\

### Atlas Connectivity Analysis

The beta series 4D image for each condition in the task was subjected to an
ROI-to-ROI connectivity analysis to produce a condition-specific correlation
matrix.
The correlation coefficient estimator used for this step was empirical
covariance, as implemented in Nilearn {nilearn_ver} [@Abraham2014].
Correlation coefficients were converted to normally-distributed z-values using
Fisher's r-to-z conversion [@Fisher1915].
Figures for the correlation matrices were generated with
Matplotlib {matplotlib_ver} [@Hunter2007] and MNE-Python {mne_ver}
[@Gramfort2013; @Gramfort2014].
""".format(nilearn_ver=nilearn_ver,
           matplotlib_ver=matplotlib_ver,
           mne_ver=mne_ver)

    input_node = pe.MapNode(niu.IdentityInterface(
        fields=['betaseries_files', 'atlas_file', 'atlas_lut']),
                            iterfield=['betaseries_files'],
                            name='input_node')

    output_node = pe.Node(niu.IdentityInterface(
        fields=['correlation_matrix', 'correlation_fig']),
                          name='output_node')

    atlas_corr_node = pe.MapNode(AtlasConnectivity(),
                                 name='atlas_corr_node',
                                 iterfield=['timeseries_file'])

    workflow.connect([
        (input_node, atlas_corr_node, [('betaseries_files', 'timeseries_file'),
                                       ('atlas_file', 'atlas_file'),
                                       ('atlas_lut', 'atlas_lut')]),
        (atlas_corr_node, output_node, [('correlation_fig', 'correlation_fig'),
                                        ('correlation_matrix',
                                         'correlation_matrix')])
    ])

    return workflow
Ejemplo n.º 30
0
def init_fmriprep_wf(
    anat_only,
    aroma_melodic_dim,
    bold2t1w_dof,
    cifti_output,
    debug,
    dummy_scans,
    echo_idx,
    err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    fs_subjects_dir,
    hires,
    ignore,
    layout,
    longitudinal,
    low_mem,
    medial_surface_nan,
    omp_nthreads,
    output_dir,
    output_spaces,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    run_uuid,
    skull_strip_fixed_seed,
    skull_strip_template,
    subject_list,
    t2s_coreg,
    task_id,
    use_aroma,
    use_bbr,
    use_syn,
    work_dir,
):
    """
    Build *fMRIPrep*'s pipeline.

    This workflow organizes the execution of FMRIPREP, with a sub-workflow for
    each subject.
    If FreeSurfer's ``recon-all`` is to be run, a corresponding folder is created
    and populated with any needed template subjects under the derivatives folder.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            import os
            from collections import namedtuple, OrderedDict
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            from fmriprep.workflows.base import init_fmriprep_wf
            os.environ['FREESURFER_HOME'] = os.getcwd()
            wf = init_fmriprep_wf(
                anat_only=False,
                aroma_melodic_dim=-200,
                bold2t1w_dof=9,
                cifti_output=False,
                debug=False,
                dummy_scans=None,
                echo_idx=None,
                err_on_aroma_warn=False,
                fmap_bspline=False,
                fmap_demean=True,
                force_syn=True,
                freesurfer=True,
                fs_subjects_dir=None,
                hires=True,
                ignore=[],
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                medial_surface_nan=False,
                omp_nthreads=1,
                output_dir='.',
                output_spaces=OrderedDict([
                    ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
                    ('T1w', {}), ('fsnative', {})]),
                regressors_all_comps=False,
                regressors_dvars_th=1.5,
                regressors_fd_th=0.5,
                run_uuid='X',
                skull_strip_fixed_seed=False,
                skull_strip_template=('OASIS30ANTs', {}),
                subject_list=['fmripreptest'],
                t2s_coreg=False,
                task_id='',
                use_aroma=False,
                use_bbr=True,
                use_syn=True,
                work_dir='.',
            )


    Parameters
    ----------
    anat_only : bool
        Disable functional workflows
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    cifti_output : bool
        Generate bold CIFTI file in output spaces
    debug : bool
        Enable debugging outputs
    dummy_scans : int or None
        Number of volumes to consider as non steady state
    echo_idx : int or None
        Index of echo to preprocess in multiecho BOLD series,
        or ``None`` to preprocess all
    err_on_aroma_warn : bool
        Do not fail on ICA-AROMA errors
    fmap_bspline : bool
        **Experimental**: Fit B-Spline field using least-squares
    fmap_demean : bool
        Demean voxel-shift map during unwarp
    force_syn : bool
        **Temporary**: Always run SyN-based SDC
    freesurfer : bool
        Enable FreeSurfer surface reconstruction (may increase runtime)
    hires : bool
        Enable sub-millimeter preprocessing in FreeSurfer
    ignore : list
        Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : bool
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : bool
        Write uncompressed .nii files in some cases to reduce memory usage
    medial_surface_nan : bool
        Replace medial wall values with NaNs on functional GIFTI files
    omp_nthreads : int
        Maximum number of threads an individual process may use
    output_dir : str
        Directory in which to save derivatives
    output_spaces : OrderedDict
        Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
        ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
        nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
        or paths pointing to custom templates organized in a TemplateFlow-like structure.
        Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
        could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
        resolution version of the selected template).
    regressors_all_comps
        Return all CompCor component time series instead of the top fraction
    regressors_dvars_th
        Criterion for flagging DVARS outliers
    regressors_fd_th
        Criterion for flagging framewise displacement outliers
    run_uuid : str
        Unique identifier for execution instance
    skull_strip_template : tuple
        Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
        and corresponding dictionary of output-space modifiers.
    skull_strip_fixed_seed : bool
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    subject_list : list
        List of subject labels
    t2s_coreg : bool
        For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
    task_id : str or None
        Task ID of BOLD series to preprocess, or ``None`` to preprocess all
    use_aroma : bool
        Perform ICA-AROMA on MNI-resampled functional series
    use_bbr : bool or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
    use_syn : bool
        **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
        If fieldmaps are present and enabled, this is not run, by default.
    work_dir : str
        Directory in which to store workflow execution state and temporary files

    """
    fmriprep_wf = Workflow(name='fmriprep_wf')
    fmriprep_wf.base_dir = work_dir

    if freesurfer:
        fsdir = pe.Node(BIDSFreeSurferDir(
            derivatives=output_dir,
            freesurfer_home=os.getenv('FREESURFER_HOME'),
            spaces=[
                s for s in output_spaces.keys() if s.startswith('fsaverage')
            ] + ['fsnative'] * ('fsnative' in output_spaces)),
                        name='fsdir_run_' + run_uuid.replace('-', '_'),
                        run_without_submitting=True)
        if fs_subjects_dir is not None:
            fsdir.inputs.subjects_dir = str(fs_subjects_dir.absolute())

    reportlets_dir = os.path.join(work_dir, 'reportlets')
    for subject_id in subject_list:
        single_subject_wf = init_single_subject_wf(
            anat_only=anat_only,
            aroma_melodic_dim=aroma_melodic_dim,
            bold2t1w_dof=bold2t1w_dof,
            cifti_output=cifti_output,
            debug=debug,
            dummy_scans=dummy_scans,
            echo_idx=echo_idx,
            err_on_aroma_warn=err_on_aroma_warn,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            force_syn=force_syn,
            freesurfer=freesurfer,
            hires=hires,
            ignore=ignore,
            layout=layout,
            longitudinal=longitudinal,
            low_mem=low_mem,
            medial_surface_nan=medial_surface_nan,
            name="single_subject_" + subject_id + "_wf",
            omp_nthreads=omp_nthreads,
            output_dir=output_dir,
            output_spaces=output_spaces,
            regressors_all_comps=regressors_all_comps,
            regressors_dvars_th=regressors_dvars_th,
            regressors_fd_th=regressors_fd_th,
            reportlets_dir=reportlets_dir,
            skull_strip_fixed_seed=skull_strip_fixed_seed,
            skull_strip_template=skull_strip_template,
            subject_id=subject_id,
            t2s_coreg=t2s_coreg,
            task_id=task_id,
            use_aroma=use_aroma,
            use_bbr=use_bbr,
            use_syn=use_syn,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (os.path.join(
            output_dir, "fmriprep", "sub-" + subject_id, 'log', run_uuid))
        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)
        if freesurfer:
            fmriprep_wf.connect(fsdir, 'subjects_dir', single_subject_wf,
                                'inputnode.subjects_dir')
        else:
            fmriprep_wf.add_nodes([single_subject_wf])

    return fmriprep_wf
Ejemplo n.º 31
0
def init_sdc_estimate_wf(fmaps, epi_meta, omp_nthreads=1, debug=False):
    """
    Build a :abbr:`SDC (susceptibility distortion correction)` workflow.

    This workflow implements the heuristics to choose an estimation
    methodology for :abbr:`SDC (susceptibility distortion correction)`.
    When no field map information is present within the BIDS inputs,
    the EXPERIMENTAL "fieldmap-less SyN" can be performed, using
    the ``--use-syn`` argument. When ``--force-syn`` is specified,
    then the "fieldmap-less SyN" is always executed and reported
    despite of other fieldmaps available with higher priority.
    In the latter case (some sort of fieldmap(s) is available and
    ``--force-syn`` is requested), then the :abbr:`SDC (susceptibility
    distortion correction)` method applied is that with the
    highest priority.

    Parameters
    ----------
    fmaps : list of pybids dicts
        A list of dictionaries with the available fieldmaps
        (and their metadata using the key ``'metadata'`` for the
        case of :abbr:`PEPOLAR (Phase-Encoding POLARity)` fieldmaps).
    epi_meta : dict
        BIDS metadata dictionary corresponding to the
        :abbr:`EPI (echo-planar imaging)` run (i.e., suffix ``bold``,
        ``sbref``, or ``dwi``) for which the fieldmap is being estimated.
    omp_nthreads : int
        Maximum number of threads an individual process may use
    debug : bool
        Enable debugging outputs

    Inputs
    ------
    epi_file
        A reference image calculated at a previous stage
    epi_brain
        Same as above, but brain-masked
    epi_mask
        Brain mask for the run
    t1w_brain
        T1w image, brain-masked, for the fieldmap-less SyN method
    std2anat_xfm
        Standard-to-T1w transform generated during spatial
        normalization (only for the fieldmap-less SyN method).

    Outputs
    -------
    epi_corrected
        The EPI scan reference after unwarping.
    epi_mask
        The corresponding new mask after unwarping
    epi_brain
        Brain-extracted, unwarped EPI scan reference
    out_warp
        The deformation field to unwarp the susceptibility distortions
    syn_ref
        If ``--force-syn``, an unwarped EPI scan reference with this
        method (for reporting purposes)
    method : str
        Short description of the estimation method that was run.

    """
    workflow = Workflow(name='sdc_estimate_wf' if fmaps else 'sdc_bypass_wf')
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['epi_file', 'epi_brain', 'epi_mask', 't1w_brain', 'std2anat_xfm']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['epi_corrected', 'epi_mask', 'epi_brain',
                'out_warp', 'syn_ref', 'method']),
        name='outputnode')

    # No fieldmaps - forward inputs to outputs
    if not fmaps:
        workflow.__postdesc__ = """\
Susceptibility distortion correction (SDC) was omitted.
"""
        outputnode.inputs.method = 'None'
        workflow.connect([
            (inputnode, outputnode, [('epi_file', 'epi_corrected'),
                                     ('epi_mask', 'epi_mask'),
                                     ('epi_brain', 'epi_brain')]),
        ])
        return workflow

    workflow.__postdesc__ = """\
Based on the estimated susceptibility distortion, a corrected
EPI (echo-planar imaging) reference was calculated for a more
accurate co-registration with the anatomical reference.
"""

    only_syn = 'syn' in fmaps and len(fmaps) == 1

    # PEPOLAR path
    if 'epi' in fmaps:
        from .pepolar import init_pepolar_unwarp_wf, check_pes

        # SyN works without this metadata
        if epi_meta.get('PhaseEncodingDirection') is None:
            raise ValueError(
                'PhaseEncodingDirection is not defined within the metadata retrieved '
                'for the intended EPI (DWI, BOLD, or SBRef) run.')
        outputnode.inputs.method = 'PEB/PEPOLAR (phase-encoding based / PE-POLARity)'

        fmaps_epi = [(v[0], v[1].get('PhaseEncodingDirection'))
                     for v in fmaps['epi']]

        if not all(list(zip(*fmaps_epi))[1]):
            raise ValueError(
                'At least one of the EPI runs with alternative phase-encoding '
                'blips is missing the required "PhaseEncodingDirection" metadata entry.')

        # Find matched PE directions
        matched_pe = check_pes(fmaps_epi, epi_meta['PhaseEncodingDirection'])

        # Get EPI polarities and their metadata
        sdc_unwarp_wf = init_pepolar_unwarp_wf(
            matched_pe=matched_pe,
            omp_nthreads=omp_nthreads)
        sdc_unwarp_wf.inputs.inputnode.epi_pe_dir = epi_meta['PhaseEncodingDirection']
        sdc_unwarp_wf.inputs.inputnode.fmaps_epi = fmaps_epi

        workflow.connect([
            (inputnode, sdc_unwarp_wf, [
                ('epi_file', 'inputnode.in_reference'),
                ('epi_brain', 'inputnode.in_reference_brain'),
                ('epi_mask', 'inputnode.in_mask')]),
        ])

    # FIELDMAP path
    elif 'fieldmap' in fmaps or 'phasediff' in fmaps:
        from .fmap import init_fmap2field_wf
        from .unwarp import init_sdc_unwarp_wf

        # SyN works without this metadata
        if epi_meta.get('PhaseEncodingDirection') is None:
            raise ValueError(
                'PhaseEncodingDirection is not defined within the metadata retrieved '
                'for the intended EPI (DWI, BOLD, or SBRef) run.')

        if 'fieldmap' in fmaps:
            from .fmap import init_fmap_wf
            try:
                fmap, = fmaps['fieldmap']
            except ValueError:
                LOGGER.warning('Several B0 fieldmaps found for the given target, using '
                               'the first one.')
                fmap = fmaps['fieldmap'][0]

            outputnode.inputs.method = 'FMB (fieldmap-based) - directly measured B0 map'
            fmap_wf = init_fmap_wf(
                omp_nthreads=omp_nthreads,
                fmap_bspline=False)
            # set inputs
            fmap_wf.inputs.inputnode.magnitude = [
                m for m, _ in fmap['magnitude']]
            fmap_wf.inputs.inputnode.fieldmap = [
                m for m, _ in fmap['fieldmap']]
        elif 'phasediff' in fmaps:
            from .phdiff import init_phdiff_wf
            try:
                fmap, = fmaps['phasediff']
            except ValueError:
                LOGGER.warning('Several phase-difference maps found for the given target, using '
                               'the first one.')
                fmap = fmaps['phasediff'][0]

            outputnode.inputs.method = 'FMB (fieldmap-based) - phase-difference map'
            fmap_wf = init_phdiff_wf(omp_nthreads=omp_nthreads)
            # set inputs
            fmap_wf.inputs.inputnode.magnitude = [
                m for m, _ in fmap['magnitude']]
            fmap_wf.inputs.inputnode.phasediff = fmap['phases']

        fmap2field_wf = init_fmap2field_wf(omp_nthreads=omp_nthreads, debug=debug)
        fmap2field_wf.inputs.inputnode.metadata = epi_meta

        sdc_unwarp_wf = init_sdc_unwarp_wf(
            omp_nthreads=omp_nthreads,
            debug=debug,
            name='sdc_unwarp_wf')

        workflow.connect([
            (inputnode, fmap2field_wf, [
                ('epi_file', 'inputnode.in_reference'),
                ('epi_brain', 'inputnode.in_reference_brain')]),
            (inputnode, sdc_unwarp_wf, [
                ('epi_file', 'inputnode.in_reference'),
                ('epi_mask', 'inputnode.in_reference_mask')]),
            (fmap_wf, fmap2field_wf, [
                ('outputnode.fmap', 'inputnode.fmap'),
                ('outputnode.fmap_ref', 'inputnode.fmap_ref'),
                ('outputnode.fmap_mask', 'inputnode.fmap_mask')]),
            (fmap2field_wf, sdc_unwarp_wf, [
                ('outputnode.out_warp', 'inputnode.in_warp')]),

        ])
    elif not only_syn:
        raise ValueError('Fieldmaps of types %s are not supported' %
                         ', '.join(['"%s"' % f for f in fmaps]))

    # FIELDMAP-less path
    if 'syn' in fmaps:
        from .syn import init_syn_sdc_wf
        syn_sdc_wf = init_syn_sdc_wf(
            epi_pe=epi_meta.get('PhaseEncodingDirection', None),
            omp_nthreads=omp_nthreads)

        workflow.connect([
            (inputnode, syn_sdc_wf, [
                ('epi_file', 'inputnode.in_reference'),
                ('epi_brain', 'inputnode.in_reference_brain'),
                ('t1w_brain', 'inputnode.t1w_brain'),
                ('std2anat_xfm', 'inputnode.std2anat_xfm')]),
        ])

        # XXX Eliminate branch when forcing isn't an option
        if only_syn:  # No fieldmaps, but --use-syn
            outputnode.inputs.method = 'FLB ("fieldmap-less", SyN-based)'
            sdc_unwarp_wf = syn_sdc_wf
        else:  # --force-syn was called when other fieldmap was present
            sdc_unwarp_wf.__desc__ = None
            workflow.connect([
                (syn_sdc_wf, outputnode, [
                    ('outputnode.out_reference', 'syn_ref')]),
            ])

    workflow.connect([
        (sdc_unwarp_wf, outputnode, [
            ('outputnode.out_warp', 'out_warp'),
            ('outputnode.out_reference', 'epi_corrected'),
            ('outputnode.out_reference_brain', 'epi_brain'),
            ('outputnode.out_mask', 'epi_mask')]),
    ])

    return workflow
Ejemplo n.º 32
0
def init_func_preproc_wf(bold_file):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep import config
            from fmriprep.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.nibabel import ApplyMask
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.utils import DictMerge
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    # Have some options handy
    layout = config.execution.layout
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces
    output_dir = str(config.execution.output_dir)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        'Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
        'Memory resampled/largemem=%.2f/%.2f GB.',
        ref_file, mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # Find associated sbref, if possible
    entities = layout.parse_file_entities(ref_file)
    entities['suffix'] = 'sbref'
    entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
    files = layout.get(return_type='file', **entities)
    refbase = os.path.basename(ref_file)
    if 'sbref' in config.workflow.ignore:
        config.loggers.workflow.info("Single-band reference files ignored.")
    elif files and multiecho:
        config.loggers.workflow.warning(
            "Single-band reference found, but not supported in "
            "multi-echo workflows at this time. Ignoring.")
    elif files:
        sbref_file = files[0]
        sbbase = os.path.basename(sbref_file)
        if len(files) > 1:
            config.loggers.workflow.warning(
                "Multiple single-band reference files found for {}; using "
                "{}".format(refbase, sbbase))
        else:
            config.loggers.workflow.info("Using single-band reference file %s.",
                                         sbbase)
    else:
        config.loggers.workflow.info("No single-band-reference found for %s.",
                                     refbase)

    metadata = layout.get_metadata(ref_file)

    # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
    fmaps = None
    if 'fieldmaps' not in config.workflow.ignore:
        fmaps = fieldmap_wrangler(layout, ref_file,
                                  use_syn=config.workflow.use_syn_sdc,
                                  force_syn=config.workflow.force_syn)
    elif config.workflow.use_syn_sdc or config.workflow.force_syn:
        # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
        fmaps = {'syn': False}

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming")) and
               'slicetiming' not in config.workflow.ignore and
               (_get_series_len(ref_file) > 4 or "TooShort"))

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_file', 'subjects_dir', 'subject_id',
                't1w_preproc', 't1w_mask', 't1w_dseg', 't1w_tpms',
                't1w_aseg', 't1w_aparc',
                'anat2std_xfm', 'std2anat_xfm', 'template',
                't1w2fsnative_xfm', 'fsnative2t1w_xfm']),
        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file), name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1', 'bold_aparc_t1',
                'bold_std', 'bold_std_ref', 'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std',
                'bold_native', 'bold_cifti', 'cifti_variant', 'cifti_metadata', 'cifti_density',
                'surfaces', 'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
                'confounds_metadata']),
        name='outputnode')

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='boldbuffer')

    summary = pe.Node(
        FunctionalSummary(
            slice_timing=run_stc,
            registration=('FSL', 'FreeSurfer')[freesurfer],
            registration_dof=config.workflow.bold2t1w_dof,
            registration_init=config.workflow.bold2t1w_init,
            pe_direction=metadata.get("PhaseEncodingDirection"),
            tr=metadata.get("RepetitionTime")),
        name='summary', mem_gb=config.DEFAULT_MEMORY_MIN_GB, run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=output_dir,
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file', 'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        freesurfer=freesurfer,
        mem_gb=mem_gb['resampled'],
        name='bold_reg_wf',
        omp_nthreads=omp_nthreads,
        sloppy=config.execution.debug,
        use_bbr=config.workflow.use_bbr,
        use_compression=False,
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=bool(fmaps),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=bool(fmaps),
        name='bold_bold_trans_wf'
    )
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [
                ('outputnode.skip_vols', 'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([
                (bold_reference_wf, bold_stc_wf, [
                    ('outputnode.bold_file', 'inputnode.bold_file')])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([
                (meepi_echos, bold_stc_wf, [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([
            (bold_reference_wf, boldbuffer, [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmaps, metadata,
                                       omp_nthreads=omp_nthreads,
                                       debug=config.execution.debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(niu.IdentityInterface(fields=['bold_files']),
                                 joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
                                 joinfield=['bold_files'],
                                 name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos, [
                ('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [
                ('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        (inputnode, t1w_brain, [('t1w_preproc', 'in_file'),
                                ('t1w_mask', 'in_mask')]),
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf, [
            ('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
            ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [
            ('outputnode.algo_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (inputnode, bold_reg_wf, [
            ('t1w_dseg', 'inputnode.t1w_dseg'),
            # Undefined if --fs-no-reconall, but this is safe
            ('subjects_dir', 'inputnode.subjects_dir'),
            ('subject_id', 'inputnode.subject_id'),
            ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')]),
        (t1w_brain, bold_reg_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [
            ('bold_file', 'inputnode.name_source'),
            ('t1w_mask', 'inputnode.t1w_mask'),
            ('t1w_aseg', 'inputnode.t1w_aseg'),
            ('t1w_aparc', 'inputnode.t1w_aparc')]),
        (t1w_brain, bold_t1_trans_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms', 'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [
            ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode, [('outputnode.bold_t1', 'bold_t1'),
                                        ('outputnode.bold_t1_ref', 'bold_t1_ref'),
                                        ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
                                        ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (t1w_brain, bold_sdc_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        (bold_reference_wf, bold_sdc_wf, [
            ('outputnode.ref_image', 'inputnode.epi_file'),
            ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
            ('outputnode.bold_mask', 'inputnode.epi_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp'),
            ('outputnode.epi_mask', 'inputnode.ref_bold_mask'),
            ('outputnode.epi_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf, bold_bold_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp'),
            ('outputnode.epi_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, bold_reg_wf, [
            ('outputnode.epi_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                        ('t1w_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [
            ('outputnode.movpar_file', 'inputnode.movpar_file'),
            ('outputnode.rmsd_file', 'inputnode.rmsd_file')]),
        (bold_reg_wf, bold_confounds_wf, [
            ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [
            ('outputnode.skip_vols', 'inputnode.skip_vols')]),
        (bold_bold_trans_wf, bold_confounds_wf, [
            ('outputnode.bold_mask', 'inputnode.bold_mask'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [
            ('out_files', 'inputnode.bold_file')]),
        (bold_hmc_wf, bold_bold_trans_wf, [
            ('outputnode.xforms', 'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [
                ('bold_file', 'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold')]),
            (bold_split, bold_t1_trans_wf, [
                ('out_files', 'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf, [
                (('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [
                ('outputnode.bold', 'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold')]),
            (bold_t2s_wf, bold_t1_trans_wf, [
                ('outputnode.bold', 'inputnode.bold_split')]),
        ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [
                ('t1w_dseg', 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf, [
                ('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [
                ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [
                ('outputnode.epi_corrected', 'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        # And ensure echo is dropped from report
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep'
                fmap_unwarp_report_wf.get_node(node).inputs.dismiss_entities = ("echo",)

        for node in bold_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'
                bold_sdc_wf.get_node(node).inputs.dismiss_entities = ("echo",)

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(
                KeySelect(fields=['std2anat_xfm']),
                name='sdc_select_std', run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
                                             ('template', 'keys')]),
                (sdc_select_std, bold_sdc_wf, [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [
                    ('t1w_dseg', 'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf, [
                    ('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf, [
                    ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [
                    ('outputnode.syn_ref', 'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            # And ensure echo is dropped from report
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep'
                    syn_unwarp_report_wf.get_node(node).inputs.dismiss_entities = ("echo",)

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (
            FixHeaderApplyTransforms as ApplyTransforms
        )

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                                  name='boldmask_to_t1w', mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [
                ('outputnode.itk_bold_to_t1', 'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask_t1', 'reference_image')]),
            (bold_bold_trans_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [
                ('output_image', 'bold_mask_t1')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [
                ('outputnode.bold', 'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf, [
                ('outputnode.bold_ref', 'inputnode.bold_native_ref'),
                ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3,)):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf, [
                ('template', 'inputnode.templates'),
                ('anat2std_xfm', 'inputnode.anat2std_xfm'),
                ('bold_file', 'inputnode.name_source'),
                ('t1w_aseg', 'inputnode.bold_aseg'),
                ('t1w_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [
                ('outputnode.xforms', 'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [
                ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf, bold_std_trans_wf, [
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [
                ('outputnode.out_warp', 'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode, [('outputnode.bold_std', 'bold_std'),
                                             ('outputnode.bold_std_ref', 'bold_std_ref'),
                                             ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode, [
                    ('outputnode.bold_aseg_std', 'bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            workflow.connect([
                (bold_split, bold_std_trans_wf, [
                    ('out_files', 'inputnode.bold_split')])
            ])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([
                (bold_t2s_wf, split_opt_comb, [
                    ('outputnode.bold', 'in_file')]),
                (split_opt_comb, bold_std_trans_wf, [
                    ('out_files', 'inputnode.bold_split')
                ])
            ])

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference', 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                use_fieldwarp=bool(fmaps),
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2), name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(), name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [
                    ('bold_file', 'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [
                    ('outputnode.movpar_file', 'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [
                    ('outputnode.skip_vols', 'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [
                    ('outputnode.confounds_file', 'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                    [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join,
                    [('outputnode.aroma_confounds', 'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                    [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                    [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                     ('outputnode.melodic_mix', 'melodic_mix'),
                     ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf, [
                    ('outputnode.bold_std', 'inputnode.bold_std'),
                    ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                    ('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug('Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf, [
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1', 'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [
                ('outputnode.target', 'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [
                    ('subjects_dir', 'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf, [
                    ('outputnode.bold_std', 'inputnode.bold_std'),
                    ('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode, [
                    ('outputnode.cifti_bold', 'bold_cifti'),
                    ('outputnode.cifti_variant', 'cifti_variant'),
                    ('outputnode.cifti_metadata', 'cifti_metadata'),
                    ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3,)):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            cifti_output=config.workflow.cifti_output,
            name='carpetplot_wf')

        if config.workflow.cifti_output:
            workflow.connect(
                bold_grayords_wf, 'outputnode.cifti_bold', carpetplot_wf, 'inputnode.cifti_bold'
            )
        else:
            # Xform to 'MNI152NLin2009cAsym' is always computed.
            carpetplot_select_std = pe.Node(
                KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
                name='carpetplot_select_std', run_without_submitting=True)

            workflow.connect([
                (inputnode, carpetplot_select_std, [
                    ('std2anat_xfm', 'std2anat_xfm'),
                    ('template', 'keys')]),
                (carpetplot_select_std, carpetplot_wf, [
                    ('std2anat_xfm', 'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf, carpetplot_wf, [
                    ('outputnode.bold', 'inputnode.bold')]),
                (bold_bold_trans_wf, carpetplot_wf, [
                    ('outputnode.bold_mask', 'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [
                    ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
            ])

        workflow.connect([
            (bold_confounds_wf, carpetplot_wf, [
                        ('outputnode.confounds_file', 'inputnode.confounds_file')])
        ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(
        DerivativesDataSink(desc='summary', datatype="figures", dismiss_entities=("echo",)),
        name='ds_report_summary', run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(
        DerivativesDataSink(base_directory=output_dir, desc='validation', datatype="figures",
                            dismiss_entities=("echo",)),
        name='ds_report_validation', run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation, [
            ('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = output_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 33
0
def init_bold_surf_wf(mem_gb,
                      surface_spaces,
                      medial_surface_nan,
                      name='bold_surf_wf'):
    """
    Sample functional images to FreeSurfer surfaces.

    For each vertex, the cortical ribbon is sampled at six points (spaced 20% of thickness apart)
    and averaged.
    Outputs are in GIFTI format.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from fmriprep.workflows.bold import init_bold_surf_wf
            wf = init_bold_surf_wf(mem_gb=0.1,
                                   surface_spaces=['fsnative', 'fsaverage5'],
                                   medial_surface_nan=False)

    Parameters
    ----------
    surface_spaces : :obj:`list`
        List of FreeSurfer surface-spaces (either ``fsaverage{3,4,5,6,}`` or ``fsnative``)
        the functional images are to be resampled to.
        For ``fsnative``, images will be resampled to the individual subject's
        native surface.
    medial_surface_nan : :obj:`bool`
        Replace medial wall values with NaNs on functional GIFTI files

    Inputs
    ------
    source_file
        Motion-corrected BOLD series in T1 space
    t1w_preproc
        Bias-corrected structural template image
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space

    Outputs
    -------
    surfaces
        BOLD series, resampled to FreeSurfer surfaces

    """
    from nipype.interfaces.io import FreeSurferSource
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.surf import GiftiSetAnatomicalStructure

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series were resampled onto the following surfaces
(FreeSurfer reconstruction nomenclature):
{out_spaces}.
""".format(out_spaces=', '.join(['*%s*' % s for s in surface_spaces]))

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'source_file', 'subject_id', 'subjects_dir', 't1w2fsnative_xfm'
    ]),
                        name='inputnode')
    itersource = pe.Node(niu.IdentityInterface(fields=['target']),
                         name='itersource')
    itersource.iterables = [('target', surface_spaces)]

    get_fsnative = pe.Node(FreeSurferSource(),
                           name='get_fsnative',
                           run_without_submitting=True)

    def select_target(subject_id, space):
        """Get the target subject ID, given a source subject ID and a target space."""
        return subject_id if space == 'fsnative' else space

    targets = pe.Node(niu.Function(function=select_target),
                      name='targets',
                      run_without_submitting=True,
                      mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Rename the source file to the output space to simplify naming later
    rename_src = pe.Node(niu.Rename(format_string='%(subject)s',
                                    keep_ext=True),
                         name='rename_src',
                         run_without_submitting=True,
                         mem_gb=DEFAULT_MEMORY_MIN_GB)
    itk2lta = pe.Node(niu.Function(function=_itk2lta),
                      name="itk2lta",
                      run_without_submitting=True)
    sampler = pe.MapNode(fs.SampleToSurface(
        cortex_mask=True,
        interp_method='trilinear',
        out_type='gii',
        override_reg_subj=True,
        sampling_method='average',
        sampling_range=(0, 1, 0.2),
        sampling_units='frac',
    ),
                         iterfield=['hemi'],
                         name='sampler',
                         mem_gb=mem_gb * 3)
    sampler.inputs.hemi = ['lh', 'rh']
    update_metadata = pe.MapNode(GiftiSetAnatomicalStructure(),
                                 iterfield=['in_file'],
                                 name='update_metadata',
                                 mem_gb=DEFAULT_MEMORY_MIN_GB)

    outputnode = pe.JoinNode(
        niu.IdentityInterface(fields=['surfaces', 'target']),
        joinsource='itersource',
        name='outputnode')

    workflow.connect([
        (inputnode, get_fsnative, [('subject_id', 'subject_id'),
                                   ('subjects_dir', 'subjects_dir')]),
        (inputnode, targets, [('subject_id', 'subject_id')]),
        (inputnode, rename_src, [('source_file', 'in_file')]),
        (inputnode, itk2lta, [('source_file', 'src_file'),
                              ('t1w2fsnative_xfm', 'in_file')]),
        (get_fsnative, itk2lta, [('T1', 'dst_file')]),
        (inputnode, sampler, [('subjects_dir', 'subjects_dir'),
                              ('subject_id', 'subject_id')]),
        (itersource, targets, [('target', 'space')]),
        (itersource, rename_src, [('target', 'subject')]),
        (itk2lta, sampler, [('out', 'reg_file')]),
        (targets, sampler, [('out', 'target_subject')]),
        (rename_src, sampler, [('out_file', 'source_file')]),
        (update_metadata, outputnode, [('out_file', 'surfaces')]),
        (itersource, outputnode, [('target', 'target')]),
    ])

    if not medial_surface_nan:
        workflow.connect(sampler, 'out_file', update_metadata, 'in_file')
        return workflow

    from niworkflows.interfaces.freesurfer import MedialNaNs
    # Refine if medial vertices should be NaNs
    medial_nans = pe.MapNode(MedialNaNs(),
                             iterfield=['in_file'],
                             name='medial_nans',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, medial_nans, [('subjects_dir', 'subjects_dir')]),
        (sampler, medial_nans, [('out_file', 'in_file')]),
        (medial_nans, update_metadata, [('out_file', 'in_file')]),
    ])
    return workflow
Ejemplo n.º 34
0
def init_magnitude_wf(omp_nthreads, name='magnitude_wf'):
    """
    Prepare the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmaps.

    Average (if not done already) the magnitude part of the
    :abbr:`GRE (gradient recalled echo)` images, run N4 to
    correct for B1 field nonuniformity, and skull-strip the
    preprocessed magnitude.

    Workflow Graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes

            from sdcflows.workflows.fmap import init_magnitude_wf
            wf = init_magnitude_wf(omp_nthreads=6)

    Parameters
    ----------
    omp_nthreads : int
        Maximum number of threads an individual process may use
    name : str
        Name of workflow (default: ``prepare_magnitude_w``)

    Inputs
    ------
    magnitude : pathlike
        Path to the corresponding magnitude path(s).

    Outputs
    -------
    fmap_ref : pathlike
        Path to the fieldmap reference calculated in this workflow.
    fmap_mask : pathlike
        Path to a binary brain mask corresponding to the reference above.

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['magnitude']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['fmap_ref', 'fmap_mask', 'mask_report']),
        name='outputnode')

    # Merge input magnitude images
    # Do not reorient to RAS to preserve the validity of PhaseEncodingDirection
    magmrg = pe.Node(IntraModalMerge(hmc=False, to_ras=False), name='magmrg')

    # de-gradient the fields ("bias/illumination artifact")
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3,
                                                    copy_header=True),
                         name='n4_correct',
                         n_procs=omp_nthreads)
    bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True),
                  name='bet')

    workflow.connect([
        (inputnode, magmrg, [('magnitude', 'in_files')]),
        (magmrg, n4_correct, [('out_avg', 'input_image')]),
        (n4_correct, bet, [('output_image', 'in_file')]),
        (bet, outputnode, [('mask_file', 'fmap_mask'),
                           ('out_file', 'fmap_ref'),
                           ('out_report', 'mask_report')]),
    ])
    return workflow
Ejemplo n.º 35
0
def init_bold_mni_trans_wf(template,
                           freesurfer,
                           mem_gb,
                           omp_nthreads,
                           name='bold_mni_trans_wf',
                           template_out_grid='2mm',
                           use_compression=True,
                           use_fieldwarp=False):
    """
    This workflow samples functional images to the MNI template in a "single shot"
    from the original BOLD series.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_mni_trans_wf
        wf = init_bold_mni_trans_wf(template='MNI152NLin2009cAsym',
                                    freesurfer=True,
                                    mem_gb=3,
                                    omp_nthreads=1,
                                    template_out_grid='native')

    **Parameters**

        template : str
            Name of template targeted by ``template`` output space
        freesurfer : bool
            Enable sampling of FreeSurfer files
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_mni_trans_wf``)
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization.
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI

    **Inputs**

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        t1_2_mni_forward_transform
            ANTs-compatible affine-and-warp transform file
        bold_split
            Individual 3D volumes, not motion corrected
        bold_mask
            Skull-stripping mask of reference image
        bold_aseg
            FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        bold_aparc
            FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format

    **Outputs**

        bold_mni
            BOLD series, resampled to template space
        bold_mni_ref
            Reference, contrast-enhanced summary of the BOLD series, resampled to template space
        bold_mask_mni
            BOLD series mask in template space
        bold_aseg_mni
            FreeSurfer's ``aseg.mgz`` atlas, in template space at the BOLD resolution
            (only if ``recon-all`` was run)
        bold_aparc_mni
            FreeSurfer's ``aparc+aseg.mgz`` atlas, in template space at the BOLD resolution
            (only if ``recon-all`` was run)

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series were resampled to {tpl} standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(tpl=template)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'itk_bold_to_t1', 't1_2_mni_forward_transform', 'name_source',
        'bold_split', 'bold_mask', 'bold_aseg', 'bold_aparc', 'hmc_xforms',
        'fieldwarp'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_mni', 'bold_mni_ref', 'bold_mask_mni', 'bold_aseg_mni',
        'bold_aparc_mni'
    ]),
                         name='outputnode')

    def _aslist(in_value):
        if isinstance(in_value, list):
            return in_value
        return [in_value]

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB)
    gen_ref.inputs.fixed_image = str(
        get_template(template,
                     resolution=1,
                     desc=None,
                     suffix='T1w',
                     extensions=['.nii', '.nii.gz']))

    mask_mni_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                           float=True),
                           name='mask_mni_tfm',
                           mem_gb=1)

    # Write corrected file in the designated output dir
    mask_merge_tfms = pe.Node(niu.Merge(2),
                              name='mask_merge_tfms',
                              run_without_submitting=True,
                              mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]),
        (inputnode, mask_mni_tfm, [('bold_mask', 'input_image')]),
        (inputnode, mask_merge_tfms, [('t1_2_mni_forward_transform', 'in1'),
                                      (('itk_bold_to_t1', _aslist), 'in2')]),
        (mask_merge_tfms, mask_mni_tfm, [('out', 'transforms')]),
        (mask_mni_tfm, outputnode, [('output_image', 'bold_mask_mni')]),
    ])

    nxforms = 4 if use_fieldwarp else 3
    merge_xforms = pe.Node(niu.Merge(nxforms),
                           name='merge_xforms',
                           run_without_submitting=True,
                           mem_gb=DEFAULT_MEMORY_MIN_GB)
    workflow.connect([(inputnode, merge_xforms, [('hmc_xforms',
                                                  'in%d' % nxforms)])])

    if use_fieldwarp:
        workflow.connect([(inputnode, merge_xforms, [('fieldwarp', 'in3')])])

    bold_to_mni_transform = pe.Node(MultiApplyTransforms(
        interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
                                    name='bold_to_mni_transform',
                                    mem_gb=mem_gb * 3 * omp_nthreads,
                                    n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    # Generate a reference on the target T1w space
    gen_final_ref = init_bold_reference_wf(omp_nthreads=omp_nthreads,
                                           pre_mask=True)

    workflow.connect([
        (inputnode, merge_xforms, [('t1_2_mni_forward_transform', 'in1'),
                                   (('itk_bold_to_t1', _aslist), 'in2')]),
        (merge_xforms, bold_to_mni_transform, [('out', 'transforms')]),
        (inputnode, merge, [('name_source', 'header_source')]),
        (inputnode, bold_to_mni_transform, [('bold_split', 'input_image')]),
        (bold_to_mni_transform, merge, [('out_files', 'in_files')]),
        (merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
        (mask_mni_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')
                                       ]),
        (merge, outputnode, [('out_file', 'bold_mni')]),
        (gen_final_ref, outputnode, [('outputnode.ref_image', 'bold_mni_ref')
                                     ]),
    ])

    if template_out_grid == 'native':
        workflow.connect([
            (gen_ref, mask_mni_tfm, [('out_file', 'reference_image')]),
            (gen_ref, bold_to_mni_transform, [('out_file', 'reference_image')
                                              ]),
        ])
    elif template_out_grid in ['1mm', '2mm']:
        res = int(template_out_grid[0])
        mask_mni_tfm.inputs.reference_image = str(
            get_template(template, resolution=res, desc='brain',
                         suffix='mask'))
        bold_to_mni_transform.inputs.reference_image = str(
            get_template(template,
                         resolution=res,
                         desc=None,
                         suffix='T1w',
                         extensions=['.nii', '.nii.gz']))
    else:
        mask_mni_tfm.inputs.reference_image = template_out_grid
        bold_to_mni_transform.inputs.reference_image = template_out_grid

    if freesurfer:
        # Sample the parcellation files to functional space
        aseg_mni_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                               float=True),
                               name='aseg_mni_tfm',
                               mem_gb=1)
        aparc_mni_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                float=True),
                                name='aparc_mni_tfm',
                                mem_gb=1)

        workflow.connect([
            (inputnode, aseg_mni_tfm, [('bold_aseg', 'input_image'),
                                       ('t1_2_mni_forward_transform',
                                        'transforms')]),
            (inputnode, aparc_mni_tfm, [('bold_aparc', 'input_image'),
                                        ('t1_2_mni_forward_transform',
                                         'transforms')]),
            (aseg_mni_tfm, outputnode, [('output_image', 'bold_aseg_mni')]),
            (aparc_mni_tfm, outputnode, [('output_image', 'bold_aparc_mni')]),
        ])
        if template_out_grid == 'native':
            workflow.connect([
                (gen_ref, aseg_mni_tfm, [('out_file', 'reference_image')]),
                (gen_ref, aparc_mni_tfm, [('out_file', 'reference_image')]),
            ])
        elif template_out_grid in ['1mm', '2mm']:
            entities = {
                'resolution': int(template_out_grid[0]),
                'desc': 'brain',
                'suffix': 'mask',
                'extensions': ['.nii', '.nii.gz']
            }
            aseg_mni_tfm.inputs.reference_image = str(
                get_template(template, **entities))
            aparc_mni_tfm.inputs.reference_image = str(
                get_template(template, **entities))
        else:
            aseg_mni_tfm.inputs.reference_image = template_out_grid
            aparc_mni_tfm.inputs.reference_image = template_out_grid

    return workflow
Ejemplo n.º 36
0
def init_anat_norm_wf(
    *,
    sloppy,
    omp_nthreads,
    templates,
    name="anat_norm_wf",
):
    """
    Build an individual spatial normalization workflow using ``antsRegistration``.
    Workflow Graph
        .. workflow ::
            :graph2use: orig
            :simple_form: yes
            from smriprep.workflows.norm import init_anat_norm_wf
            wf = init_anat_norm_wf(
                debug=False,
                omp_nthreads=1,
                templates=['MNI152NLin2009cAsym', 'MNI152NLin6Asym'],
            )
    .. important::
        This workflow defines an iterable input over the input parameter ``templates``,
        so Nipype will produce one copy of the downstream workflows which connect
        ``poutputnode.template`` or ``poutputnode.template_spec`` to their inputs
        (``poutputnode`` stands for *parametric output node*).
        Nipype refers to this expansion of the graph as *parameterized execution*.
        If a joint list of values is required (and thus cutting off parameterization),
        please use the equivalent outputs of ``outputnode`` (which *joins* all the
        parameterized execution paths).
    Parameters
    ----------
    debug : :obj:`bool`
        Apply sloppy arguments to speed up processing. Use with caution,
        registration processes will be very inaccurate.
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use.
    templates : :obj:`list` of :obj:`str`
        List of standard space fullnames (e.g., ``MNI152NLin6Asym``
        or ``MNIPediatricAsym:cohort-4``) which are targets for spatial
        normalization.
    Inputs
    ------
    moving_image
        The input image that will be normalized to standard space.
    moving_mask
        A precise brain mask separating skull/skin/fat from brain
        structures.
    moving_segmentation
        A brain tissue segmentation of the ``moving_image``.
    moving_tpms
        tissue probability maps (TPMs) corresponding to the
        ``moving_segmentation``.
    lesion_mask
        (optional) A mask to exclude regions from the cost-function
        input domain to enable standardization of lesioned brains.
    orig_t1w
        The original T1w image from the BIDS structure.
    template
        Template name and specification
    Outputs
    -------
    standardized
        The T1w after spatial normalization, in template space.
    anat2std_xfm
        The T1w-to-template transform.
    std2anat_xfm
        The template-to-T1w transform.
    std_mask
        The ``moving_mask`` in template space (matches ``standardized`` output).
    std_dseg
        The ``moving_segmentation`` in template space (matches ``standardized``
        output).
    std_tpms
        The ``moving_tpms`` in template space (matches ``standardized`` output).
    template
        Template name extracted from the input parameter ``template``, for further
        use in downstream nodes.
    template_spec
        Template specifications extracted from the input parameter ``template``, for
        further use in downstream nodes.
    """
    from ...utils.patches import set_reg_resolution, set_tf_resolution

    ntpls = len(templates)
    workflow = Workflow(name=name)

    if templates:
        workflow.__desc__ = """\
Volume-based spatial normalization to {targets} ({targets_id}) was performed through
nonlinear registration with `antsRegistration` (ANTs {ants_ver}),
using brain-extracted versions of both T1w reference and the T1w template.
The following template{tpls} selected for spatial normalization:
""".format(
            ants_ver=ANTsInfo.version() or "(version unknown)",
            targets="%s standard space%s" % (
                defaultdict("several".format, {
                    1: "one",
                    2: "two",
                    3: "three",
                    4: "four"
                })[ntpls],
                "s" * (ntpls != 1),
            ),
            targets_id=", ".join(templates),
            tpls=(" was", "s were")[ntpls != 1],
        )

        # Append template citations to description
        for template in templates:
            template_meta = get_metadata(template.split(":")[0])
            template_refs = ["@%s" % template.split(":")[0].lower()]

            if template_meta.get("RRID", None):
                template_refs += ["RRID:%s" % template_meta["RRID"]]

            workflow.__desc__ += """\
*{template_name}* [{template_refs}; TemplateFlow ID: {template}]""".format(
                template=template,
                template_name=template_meta["Name"],
                template_refs=", ".join(template_refs),
            )
            workflow.__desc__ += (", ", ".")[template == templates[-1][0]]

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "lesion_mask",
            "moving_image",
            "moving_mask",
            "moving_segmentation",
            "moving_tpms",
            "orig_t1w",
            "template",
        ]),
        name="inputnode",
    )
    inputnode.iterables = [("template", templates)]

    out_fields = [
        "anat2std_xfm",
        "standardized",
        "std2anat_xfm",
        "std_dseg",
        "std_mask",
        "std_tpms",
        "template",
        "template_spec",
    ]
    poutputnode = pe.Node(niu.IdentityInterface(fields=out_fields),
                          name="poutputnode")

    split_desc = pe.Node(TemplateDesc(),
                         run_without_submitting=True,
                         name="split_desc")

    # Nibabies hacks
    set_tf_res = pe.Node(niu.Function(function=set_tf_resolution),
                         name='set_tf_res')
    set_tf_res.inputs.sloppy = sloppy
    set_reg_res = pe.Node(niu.Function(function=set_reg_resolution),
                          name='set_reg_res')

    tf_select = pe.Node(
        TemplateFlowSelect(),
        name="tf_select",
        run_without_submitting=True,
    )

    # With the improvements from nipreps/niworkflows#342 this truncation is now necessary
    trunc_mov = pe.Node(
        ants.ImageMath(operation="TruncateImageIntensity",
                       op2="0.01 0.999 256"),
        name="trunc_mov",
    )

    registration = pe.Node(
        SpatialNormalization(
            float=True,
            flavor=["precise", "testing"][sloppy],
        ),
        name="registration",
        n_procs=omp_nthreads,
        mem_gb=2,
    )

    # Resample T1w-space inputs
    tpl_moving = pe.Node(
        ApplyTransforms(
            dimension=3,
            default_value=0,
            float=True,
            interpolation="LanczosWindowedSinc",
        ),
        name="tpl_moving",
    )

    std_mask = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                       name="std_mask")
    std_dseg = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                       name="std_dseg")

    std_tpms = pe.MapNode(
        ApplyTransforms(dimension=3,
                        default_value=0,
                        float=True,
                        interpolation="Gaussian"),
        iterfield=["input_image"],
        name="std_tpms",
    )

    # fmt:off
    workflow.connect([
        (inputnode, split_desc, [('template', 'template')]),
        (inputnode, poutputnode, [('template', 'template')]),
        (inputnode, trunc_mov, [('moving_image', 'op1')]),
        (inputnode, registration, [('moving_mask', 'moving_mask'),
                                   ('lesion_mask', 'lesion_mask')]),
        (inputnode, tpl_moving, [('moving_image', 'input_image')]),
        (inputnode, std_mask, [('moving_mask', 'input_image')]),
        (split_desc, set_tf_res, [('name', 'template')]),
        (split_desc, tf_select, [('name', 'template'),
                                 ('spec', 'template_spec')]),
        (set_tf_res, tf_select, [('out', 'resolution')]),
        (split_desc, set_reg_res, [('name', 'template')]),
        (set_reg_res, registration, [('out', 'template_resolution')]),
        (split_desc, registration, [('name', 'template'),
                                    ('spec', 'template_spec')]),
        (tf_select, tpl_moving, [('t1w_file', 'reference_image')]),
        (tf_select, std_mask, [('t1w_file', 'reference_image')]),
        (tf_select, std_dseg, [('t1w_file', 'reference_image')]),
        (tf_select, std_tpms, [('t1w_file', 'reference_image')]),
        (trunc_mov, registration, [('output_image', 'moving_image')]),
        (registration, tpl_moving, [('composite_transform', 'transforms')]),
        (registration, std_mask, [('composite_transform', 'transforms')]),
        (inputnode, std_dseg, [('moving_segmentation', 'input_image')]),
        (registration, std_dseg, [('composite_transform', 'transforms')]),
        (inputnode, std_tpms, [('moving_tpms', 'input_image')]),
        (registration, std_tpms, [('composite_transform', 'transforms')]),
        (registration, poutputnode, [('composite_transform', 'anat2std_xfm'),
                                     ('inverse_composite_transform',
                                      'std2anat_xfm')]),
        (tpl_moving, poutputnode, [('output_image', 'standardized')]),
        (std_mask, poutputnode, [('output_image', 'std_mask')]),
        (std_dseg, poutputnode, [('output_image', 'std_dseg')]),
        (std_tpms, poutputnode, [('output_image', 'std_tpms')]),
        (split_desc, poutputnode, [('spec', 'template_spec')]),
    ])
    # fmt:on

    # Provide synchronized output
    outputnode = pe.JoinNode(
        niu.IdentityInterface(fields=out_fields),
        name="outputnode",
        joinsource="inputnode",
    )
    # fmt:off
    workflow.connect([
        (poutputnode, outputnode, [(f, f) for f in out_fields]),
    ])
    # fmt:on

    return workflow
Ejemplo n.º 37
0
def init_bold_surf_wf(mem_gb,
                      output_spaces,
                      medial_surface_nan,
                      name='bold_surf_wf'):
    """
    This workflow samples functional images to FreeSurfer surfaces

    For each vertex, the cortical ribbon is sampled at six points (spaced 20% of thickness apart)
    and averaged.

    Outputs are in GIFTI format.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_surf_wf
        wf = init_bold_surf_wf(mem_gb=0.1,
                               output_spaces=['T1w', 'fsnative',
                                             'template', 'fsaverage5'],
                               medial_surface_nan=False)

    **Parameters**

        output_spaces : list
            List of output spaces functional images are to be resampled to
            Target spaces beginning with ``fs`` will be selected for resampling,
            such as ``fsaverage`` or related template spaces
            If the list contains ``fsnative``, images will be resampled to the
            individual subject's native surface
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files

    **Inputs**

        source_file
            Motion-corrected BOLD series in T1 space
        t1_preproc
            Bias-corrected structural template image
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space

    **Outputs**

        surfaces
            BOLD series, resampled to FreeSurfer surfaces

    """
    # Ensure volumetric spaces do not sneak into this workflow
    spaces = [space for space in output_spaces if space.startswith('fs')]

    workflow = Workflow(name=name)

    if spaces:
        workflow.__desc__ = """\
The BOLD time-series, were resampled to surfaces on the following
spaces: {out_spaces}.
""".format(out_spaces=', '.join(['*%s*' % s for s in spaces]))
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'source_file', 't1_preproc', 'subject_id', 'subjects_dir',
        't1_2_fsnative_forward_transform'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['surfaces']),
                         name='outputnode')

    def select_target(subject_id, space):
        """ Given a source subject ID and a target space, get the target subject ID """
        return subject_id if space == 'fsnative' else space

    targets = pe.MapNode(niu.Function(function=select_target),
                         iterfield=['space'],
                         name='targets',
                         mem_gb=DEFAULT_MEMORY_MIN_GB)
    targets.inputs.space = spaces

    # Rename the source file to the output space to simplify naming later
    rename_src = pe.MapNode(niu.Rename(format_string='%(subject)s',
                                       keep_ext=True),
                            iterfield='subject',
                            name='rename_src',
                            run_without_submitting=True,
                            mem_gb=DEFAULT_MEMORY_MIN_GB)
    rename_src.inputs.subject = spaces

    resampling_xfm = pe.Node(LTAConvert(in_lta='identity.nofile',
                                        out_lta=True),
                             name='resampling_xfm')
    set_xfm_source = pe.Node(ConcatenateLTA(out_type='RAS2RAS'),
                             name='set_xfm_source')

    sampler = pe.MapNode(fs.SampleToSurface(sampling_method='average',
                                            sampling_range=(0, 1, 0.2),
                                            sampling_units='frac',
                                            interp_method='trilinear',
                                            cortex_mask=True,
                                            override_reg_subj=True,
                                            out_type='gii'),
                         iterfield=['source_file', 'target_subject'],
                         iterables=('hemi', ['lh', 'rh']),
                         name='sampler',
                         mem_gb=mem_gb * 3)

    medial_nans = pe.MapNode(MedialNaNs(),
                             iterfield=['in_file', 'target_subject'],
                             name='medial_nans',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    merger = pe.JoinNode(niu.Merge(1, ravel_inputs=True),
                         name='merger',
                         joinsource='sampler',
                         joinfield=['in1'],
                         run_without_submitting=True,
                         mem_gb=DEFAULT_MEMORY_MIN_GB)

    update_metadata = pe.MapNode(GiftiSetAnatomicalStructure(),
                                 iterfield='in_file',
                                 name='update_metadata',
                                 mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, targets, [('subject_id', 'subject_id')]),
        (inputnode, rename_src, [('source_file', 'in_file')]),
        (inputnode, resampling_xfm, [('source_file', 'source_file'),
                                     ('t1_preproc', 'target_file')]),
        (inputnode, set_xfm_source, [('t1_2_fsnative_forward_transform',
                                      'in_lta2')]),
        (resampling_xfm, set_xfm_source, [('out_lta', 'in_lta1')]),
        (inputnode, sampler, [('subjects_dir', 'subjects_dir'),
                              ('subject_id', 'subject_id')]),
        (set_xfm_source, sampler, [('out_file', 'reg_file')]),
        (targets, sampler, [('out', 'target_subject')]),
        (rename_src, sampler, [('out_file', 'source_file')]),
        (merger, update_metadata, [('out', 'in_file')]),
        (update_metadata, outputnode, [('out_file', 'surfaces')]),
    ])

    if medial_surface_nan:
        workflow.connect([
            (inputnode, medial_nans, [('subjects_dir', 'subjects_dir')]),
            (sampler, medial_nans, [('out_file', 'in_file')]),
            (targets, medial_nans, [('out', 'target_subject')]),
            (medial_nans, merger, [('out_file', 'in1')]),
        ])
    else:
        workflow.connect(sampler, 'out_file', merger, 'in1')

    return workflow
Ejemplo n.º 38
0
def init_anat_reports_wf(*, freesurfer, output_dir, name="anat_reports_wf"):
    """
    Set up a battery of datasinks to store reports in the right location.

    Parameters
    ----------
    freesurfer : :obj:`bool`
        FreeSurfer was enabled
    output_dir : :obj:`str`
        Directory in which to save derivatives
    name : :obj:`str`
        Workflow name (default: anat_reports_wf)

    Inputs
    ------
    source_file
        Input T1w image
    std_t1w
        T1w image resampled to standard space
    std_mask
        Mask of skull-stripped template
    subject_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w_conform_report
        Conformation report
    t1w_preproc
        The T1w reference map, which is calculated as the average of bias-corrected
        and preprocessed T1w images, defining the anatomical space.
    t1w_dseg
        Segmentation in T1w space
    t1w_mask
        Brain (binary) mask estimated by brain extraction.
    template
        Template space and specifications

    """
    from niworkflows.interfaces.reportlets.registration import (
        SimpleBeforeAfterRPT as SimpleBeforeAfter, )
    from niworkflows.interfaces.reportlets.masks import ROIsPlot
    from ..interfaces.templateflow import TemplateFlowSelect

    workflow = Workflow(name=name)

    inputfields = [
        "source_file",
        "t1w_conform_report",
        "t1w_preproc",
        "t1w_dseg",
        "t1w_mask",
        "template",
        "std_t1w",
        "std_mask",
        "subject_id",
        "subjects_dir",
    ]
    inputnode = pe.Node(niu.IdentityInterface(fields=inputfields),
                        name="inputnode")

    seg_rpt = pe.Node(ROIsPlot(colors=["b", "magenta"], levels=[1.5, 2.5]),
                      name="seg_rpt")

    t1w_conform_check = pe.Node(
        niu.Function(function=_empty_report),
        name="t1w_conform_check",
        run_without_submitting=True,
    )

    ds_t1w_conform_report = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            desc="conform",
                            datatype="figures"),
        name="ds_t1w_conform_report",
        run_without_submitting=True,
    )

    ds_t1w_dseg_mask_report = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            suffix="dseg",
                            datatype="figures"),
        name="ds_t1w_dseg_mask_report",
        run_without_submitting=True,
    )

    # fmt:off
    workflow.connect([
        (inputnode, t1w_conform_check, [('t1w_conform_report', 'in_file')]),
        (t1w_conform_check, ds_t1w_conform_report, [('out', 'in_file')]),
        (inputnode, ds_t1w_conform_report, [('source_file', 'source_file')]),
        (inputnode, ds_t1w_dseg_mask_report, [('source_file', 'source_file')]),
        (inputnode, seg_rpt, [('t1w_preproc', 'in_file'),
                              ('t1w_mask', 'in_mask'),
                              ('t1w_dseg', 'in_rois')]),
        (seg_rpt, ds_t1w_dseg_mask_report, [('out_report', 'in_file')]),
    ])
    # fmt:on

    # Generate reportlets showing spatial normalization
    tf_select = pe.Node(TemplateFlowSelect(resolution=1),
                        name="tf_select",
                        run_without_submitting=True)
    norm_msk = pe.Node(
        niu.Function(
            function=_rpt_masks,
            output_names=["before", "after"],
            input_names=["mask_file", "before", "after", "after_mask"],
        ),
        name="norm_msk",
    )
    norm_rpt = pe.Node(SimpleBeforeAfter(), name="norm_rpt", mem_gb=0.1)
    norm_rpt.inputs.after_label = "Participant"  # after

    ds_std_t1w_report = pe.Node(
        DerivativesDataSink(base_directory=output_dir,
                            suffix="T1w",
                            datatype="figures"),
        name="ds_std_t1w_report",
        run_without_submitting=True,
    )

    # fmt:off
    workflow.connect([
        (inputnode, tf_select, [(('template', _drop_cohort), 'template'),
                                (('template', _pick_cohort), 'cohort')]),
        (inputnode, norm_rpt, [('template', 'before_label')]),
        (inputnode, norm_msk, [('std_t1w', 'after'),
                               ('std_mask', 'after_mask')]),
        (tf_select, norm_msk, [('t1w_file', 'before'),
                               ('brain_mask', 'mask_file')]),
        (norm_msk, norm_rpt, [('before', 'before'), ('after', 'after')]),
        (inputnode, ds_std_t1w_report, [(('template', _fmt), 'space'),
                                        ('source_file', 'source_file')]),
        (norm_rpt, ds_std_t1w_report, [('out_report', 'in_file')]),
    ])
    # fmt:on

    if freesurfer:
        from ..interfaces.reports import FSSurfaceReport

        recon_report = pe.Node(FSSurfaceReport(), name="recon_report")
        recon_report.interface._always_run = True

        ds_recon_report = pe.Node(
            DerivativesDataSink(base_directory=output_dir,
                                desc="reconall",
                                datatype="figures"),
            name="ds_recon_report",
            run_without_submitting=True,
        )
        # fmt:off
        workflow.connect([
            (inputnode, recon_report, [('subjects_dir', 'subjects_dir'),
                                       ('subject_id', 'subject_id')]),
            (recon_report, ds_recon_report, [('out_report', 'in_file')]),
            (inputnode, ds_recon_report, [('source_file', 'source_file')])
        ])
        # fmt:on

    return workflow
Ejemplo n.º 39
0
def init_bold_preproc_trans_wf(mem_gb,
                               omp_nthreads,
                               name='bold_preproc_trans_wf',
                               use_compression=True,
                               use_fieldwarp=False,
                               split_file=False,
                               interpolation='LanczosWindowedSinc'):
    """
    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original BOLD series.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_preproc_trans_wf
        wf = init_bold_preproc_trans_wf(mem_gb=3, omp_nthreads=1)

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_mni_trans_wf``)
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI
        split_file : bool
            Whether the input file should be splitted (it is a 4D file)
            or it is a list of 3D files (default ``False``, do not split)
        interpolation : str
            Interpolation type to be used by ANTs' ``applyTransforms``
            (default ``'LanczosWindowedSinc'``)

    **Inputs**

        bold_file
            Individual 3D volumes, not motion corrected
        bold_mask
            Skull-stripping mask of reference image
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format

    **Outputs**

        bold
            BOLD series, resampled in native space, including all preprocessing
        bold_mask
            BOLD series mask calculated with the new time-series
        bold_ref
            BOLD reference image: an average-like 3D image of the time-series
        bold_ref_brain
            Same as ``bold_ref``, but once the brain mask has been applied

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions""" if use_fieldwarp else """\
the transforms to correct for head-motion""")

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'name_source', 'bold_file', 'bold_mask', 'hmc_xforms', 'fieldwarp'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold', 'bold_mask', 'bold_ref', 'bold_ref_brain']),
                         name='outputnode')

    bold_transform = pe.Node(MultiApplyTransforms(interpolation=interpolation,
                                                  float=True,
                                                  copy_dtype=True),
                             name='bold_transform',
                             mem_gb=mem_gb * 3 * omp_nthreads,
                             n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    # Generate a new BOLD reference
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.__desc__ = None  # Unset description to avoid second appearance

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (bold_transform, merge, [('out_files', 'in_files')]),
        (merge, bold_reference_wf, [('out_file', 'inputnode.bold_file')]),
        (merge, outputnode, [('out_file', 'bold')]),
        (bold_reference_wf, outputnode,
         [('outputnode.ref_image', 'bold_ref'),
          ('outputnode.ref_image_brain', 'bold_ref_brain'),
          ('outputnode.bold_mask', 'bold_mask')]),
    ])

    # Input file is not splitted
    if split_file:
        bold_split = pe.Node(FSLSplit(dimension='t'),
                             name='bold_split',
                             mem_gb=mem_gb * 3)
        workflow.connect([(inputnode, bold_split, [('bold_file', 'in_file')]),
                          (bold_split, bold_transform, [
                              ('out_files', 'input_image'),
                              (('out_files', _first), 'reference_image'),
                          ])])
    else:
        workflow.connect([
            (inputnode, bold_transform, [('bold_file', 'input_image'),
                                         (('bold_file', _first),
                                          'reference_image')]),
        ])

    if use_fieldwarp:
        merge_xforms = pe.Node(niu.Merge(2),
                               name='merge_xforms',
                               run_without_submitting=True,
                               mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, merge_xforms, [('fieldwarp', 'in1'),
                                       ('hmc_xforms', 'in2')]),
            (merge_xforms, bold_transform, [('out', 'transforms')]),
        ])
    else:

        def _aslist(val):
            return [val]

        workflow.connect([
            (inputnode, bold_transform, [(('hmc_xforms', _aslist),
                                          'transforms')]),
        ])

    # Code ready to generate a pre/post processing report
    # bold_bold_report_wf = init_bold_preproc_report_wf(
    #     mem_gb=mem_gb['resampled'],
    #     reportlets_dir=reportlets_dir
    # )
    # workflow.connect([
    #     (inputnode, bold_bold_report_wf, [
    #         ('bold_file', 'inputnode.name_source'),
    #         ('bold_file', 'inputnode.in_pre')]),  # This should be after STC
    #     (bold_bold_trans_wf, bold_bold_report_wf, [
    #         ('outputnode.bold', 'inputnode.in_post')]),
    # ])

    return workflow
Ejemplo n.º 40
0
def init_bold_confs_wf(
    mem_gb,
    metadata,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    name="bold_confs_wf",
):
    """
    This workflow calculates confounds for a BOLD series, and aggregates them
    into a :abbr:`TSV (tab-separated value)` file, for use as nuisance
    regressors in a :abbr:`GLM (general linear model)`.

    The following confounds are calculated, with column headings in parentheses:

    #. Region-wise average signal (``csf``, ``white_matter``, ``global_signal``)
    #. DVARS - original and standardized variants (``dvars``, ``std_dvars``)
    #. Framewise displacement, based on head-motion parameters
       (``framewise_displacement``)
    #. Temporal CompCor (``t_comp_cor_XX``)
    #. Anatomical CompCor (``a_comp_cor_XX``)
    #. Cosine basis set for high-pass filtering w/ 0.008 Hz cut-off
       (``cosine_XX``)
    #. Non-steady-state volumes (``non_steady_state_XX``)
    #. Estimated head-motion parameters, in mm and rad
       (``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)


    Prior to estimating aCompCor and tCompCor, non-steady-state volumes are
    censored and high-pass filtered using a :abbr:`DCT (discrete cosine
    transform)` basis.
    The cosine basis, as well as one regressor per censored volume, are included
    for convenience.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.confounds import init_bold_confs_wf
        wf = init_bold_confs_wf(
            mem_gb=1,
            metadata={},
            regressors_all_comps=False,
            regressors_dvars_th=1.5,
            regressors_fd_th=0.5,
        )

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB - please note that this size
            should be calculated after resamplings that may extend
            the FoV
        metadata : dict
            BIDS metadata for BOLD file
        name : str
            Name of workflow (default: ``bold_confs_wf``)
        regressors_all_comps: bool
            Indicates whether CompCor decompositions should return all
            components instead of the minimal number of components necessary
            to explain 50 percent of the variance in the decomposition mask.
        regressors_dvars_th
            Criterion for flagging DVARS outliers
        regressors_fd_th
            Criterion for flagging framewise displacement outliers


    **Inputs**

        bold
            BOLD image, after the prescribed corrections (STC, HMC and SDC)
            when available.
        bold_mask
            BOLD series mask
        movpar_file
            SPM-formatted motion parameters file
        skip_vols
            number of non steady state volumes
        t1_mask
            Mask of the skull-stripped template image
        t1_tpms
            List of tissue probability maps in T1w space
        t1_bold_xform
            Affine matrix that maps the T1w space into alignment with
            the native BOLD space

    **Outputs**

        confounds_file
            TSV of all aggregated confounds
        rois_report
            Reportlet visualizing white-matter/CSF mask used for aCompCor,
            the ROI for tCompCor and the BOLD brain mask.
        confounds_metadata
            Confounds metadata dictionary.

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
Several confounding time-series were calculated based on the
*preprocessed BOLD*: framewise displacement (FD), DVARS and
three region-wise global signals.
FD and DVARS are calculated for each functional run, both using their
implementations in *Nipype* [following the definitions by @power_fd_dvars].
The three global signals are extracted within the CSF, the WM, and
the whole-brain masks.
Additionally, a set of physiological regressors were extracted to
allow for component-based noise correction [*CompCor*, @compcor].
Principal components are estimated after high-pass filtering the
*preprocessed BOLD* time-series (using a discrete cosine filter with
128s cut-off) for the two *CompCor* variants: temporal (tCompCor)
and anatomical (aCompCor).
tCompCor components are then calculated from the top 5% variable
voxels within a mask covering the subcortical regions.
This subcortical mask is obtained by heavily eroding the brain mask,
which ensures it does not include cortical GM regions.
For aCompCor, components are calculated within the intersection of
the aforementioned mask and the union of CSF and WM masks calculated
in T1w space, after their projection to the native space of each
functional run (using the inverse BOLD-to-T1w transformation). Components
are also calculated separately within the WM and CSF masks.
For each CompCor decomposition, the *k* components with the largest singular
values are retained, such that the retained components' time series are
sufficient to explain 50 percent of variance across the nuisance mask (CSF,
WM, combined, or temporal). The remaining components are dropped from
consideration.
The head-motion estimates calculated in the correction step were also
placed within the corresponding confounds file.
The confound time series derived from head motion estimates and global
signals were expanded with the inclusion of temporal derivatives and
quadratic terms for each [@confounds_satterthwaite_2013].
Frames that exceeded a threshold of {fd} mm FD or {dv} standardised DVARS
were annotated as motion outliers.
""".format(fd=regressors_fd_th, dv=regressors_dvars_th)
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold', 'bold_mask', 'movpar_file', 'skip_vols', 't1_mask', 't1_tpms',
        't1_bold_xform'
    ]),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['confounds_file', 'confounds_metadata']),
        name='outputnode')

    # Get masks ready in T1w space
    acc_tpm = pe.Node(AddTPMs(indices=[0, 2]),
                      name='tpms_add_csf_wm')  # acc stands for aCompCor
    csf_roi = pe.Node(TPM2ROI(erode_mm=0, mask_erode_mm=30), name='csf_roi')
    wm_roi = pe.Node(
        TPM2ROI(erode_prop=0.6,
                mask_erode_prop=0.6**3),  # 0.6 = radius; 0.6^3 = volume
        name='wm_roi')
    acc_roi = pe.Node(
        TPM2ROI(erode_prop=0.6,
                mask_erode_prop=0.6**3),  # 0.6 = radius; 0.6^3 = volume
        name='acc_roi')

    # Map ROIs in T1w space into BOLD space
    csf_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor',
                                      float=True),
                      name='csf_tfm',
                      mem_gb=0.1)
    wm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor',
                                     float=True),
                     name='wm_tfm',
                     mem_gb=0.1)
    acc_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor',
                                      float=True),
                      name='acc_tfm',
                      mem_gb=0.1)
    tcc_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor',
                                      float=True),
                      name='tcc_tfm',
                      mem_gb=0.1)

    # Ensure ROIs don't go off-limits (reduced FoV)
    csf_msk = pe.Node(niu.Function(function=_maskroi), name='csf_msk')
    wm_msk = pe.Node(niu.Function(function=_maskroi), name='wm_msk')
    acc_msk = pe.Node(niu.Function(function=_maskroi), name='acc_msk')
    tcc_msk = pe.Node(niu.Function(function=_maskroi), name='tcc_msk')

    # DVARS
    dvars = pe.Node(nac.ComputeDVARS(save_nstd=True,
                                     save_std=True,
                                     remove_zerovariance=True),
                    name="dvars",
                    mem_gb=mem_gb)

    # Frame displacement
    fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source="SPM"),
                    name="fdisp",
                    mem_gb=mem_gb)

    # a/t-CompCor
    mrg_lbl_cc = pe.Node(niu.Merge(3),
                         name='merge_rois_cc',
                         run_without_submitting=True)

    tcompcor = pe.Node(TCompCor(components_file='tcompcor.tsv',
                                header_prefix='t_comp_cor_',
                                pre_filter='cosine',
                                save_pre_filter=True,
                                save_metadata=True,
                                percentile_threshold=.05,
                                failure_mode='NaN'),
                       name="tcompcor",
                       mem_gb=mem_gb)

    acompcor = pe.Node(ACompCor(components_file='acompcor.tsv',
                                header_prefix='a_comp_cor_',
                                pre_filter='cosine',
                                save_pre_filter=True,
                                save_metadata=True,
                                mask_names=['combined', 'CSF', 'WM'],
                                merge_method='none',
                                failure_mode='NaN'),
                       name="acompcor",
                       mem_gb=mem_gb)

    # Set number of components
    if regressors_all_comps:
        acompcor.inputs.num_components = 'all'
        tcompcor.inputs.num_components = 'all'
    else:
        acompcor.inputs.variance_threshold = 0.5
        tcompcor.inputs.variance_threshold = 0.5

    # Set TR if present
    if 'RepetitionTime' in metadata:
        tcompcor.inputs.repetition_time = metadata['RepetitionTime']
        acompcor.inputs.repetition_time = metadata['RepetitionTime']

    # Global and segment regressors
    mrg_lbl = pe.Node(niu.Merge(3),
                      name='merge_rois',
                      run_without_submitting=True)
    signals = pe.Node(SignalExtraction(
        class_labels=["csf", "white_matter", "global_signal"]),
                      name="signals",
                      mem_gb=mem_gb)

    # Arrange confounds
    add_dvars_header = pe.Node(AddTSVHeader(columns=["dvars"]),
                               name="add_dvars_header",
                               mem_gb=0.01,
                               run_without_submitting=True)
    add_std_dvars_header = pe.Node(AddTSVHeader(columns=["std_dvars"]),
                                   name="add_std_dvars_header",
                                   mem_gb=0.01,
                                   run_without_submitting=True)
    add_motion_headers = pe.Node(AddTSVHeader(
        columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]),
                                 name="add_motion_headers",
                                 mem_gb=0.01,
                                 run_without_submitting=True)
    concat = pe.Node(GatherConfounds(),
                     name="concat",
                     mem_gb=0.01,
                     run_without_submitting=True)

    # CompCor metadata
    tcc_metadata_fmt = pe.Node(TSV2JSON(
        index_column='component',
        drop_columns=['mask'],
        output=None,
        additional_metadata={'Method': 'tCompCor'},
        enforce_case=True),
                               name='tcc_metadata_fmt')
    acc_metadata_fmt = pe.Node(TSV2JSON(
        index_column='component',
        output=None,
        additional_metadata={'Method': 'aCompCor'},
        enforce_case=True),
                               name='acc_metadata_fmt')
    mrg_conf_metadata = pe.Node(niu.Merge(2),
                                name='merge_confound_metadata',
                                run_without_submitting=True)
    mrg_conf_metadata2 = pe.Node(DictMerge(),
                                 name='merge_confound_metadata2',
                                 run_without_submitting=True)

    # Expand model to include derivatives and quadratics
    model_expand = pe.Node(
        ExpandModel(model_formula='(dd1(rps + wm + csf + gsr))^^2 + others'),
        name='model_expansion')

    # Add spike regressors
    spike_regress = pe.Node(SpikeRegressors(fd_thresh=regressors_fd_th,
                                            dvars_thresh=regressors_dvars_th),
                            name='spike_regressors')

    # Generate reportlet (ROIs)
    mrg_compcor = pe.Node(niu.Merge(2),
                          name='merge_compcor',
                          run_without_submitting=True)
    rois_plot = pe.Node(ROIsPlot(colors=['b', 'magenta'],
                                 generate_report=True),
                        name='rois_plot',
                        mem_gb=mem_gb)

    ds_report_bold_rois = pe.Node(DerivativesDataSink(desc='rois',
                                                      keep_dtype=True),
                                  name='ds_report_bold_rois',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Generate reportlet (CompCor)
    mrg_cc_metadata = pe.Node(niu.Merge(2),
                              name='merge_compcor_metadata',
                              run_without_submitting=True)
    compcor_plot = pe.Node(CompCorVariancePlot(
        variance_thresholds=(0.5, 0.7, 0.9),
        metadata_sources=['tCompCor', 'aCompCor']),
                           name='compcor_plot')
    ds_report_compcor = pe.Node(DerivativesDataSink(desc='compcorvar',
                                                    keep_dtype=True),
                                name='ds_report_compcor',
                                run_without_submitting=True,
                                mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Generate reportlet (Confound correlation)
    conf_corr_plot = pe.Node(ConfoundsCorrelationPlot(
        reference_column='global_signal', max_dim=70),
                             name='conf_corr_plot')
    ds_report_conf_corr = pe.Node(DerivativesDataSink(desc='confoundcorr',
                                                      keep_dtype=True),
                                  name='ds_report_conf_corr',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _pick_csf(files):
        return files[0]

    def _pick_wm(files):
        return files[-1]

    workflow.connect([
        # Massage ROIs (in T1w space)
        (inputnode, acc_tpm, [('t1_tpms', 'in_files')]),
        (inputnode, csf_roi, [(('t1_tpms', _pick_csf), 'in_tpm'),
                              ('t1_mask', 'in_mask')]),
        (inputnode, wm_roi, [(('t1_tpms', _pick_wm), 'in_tpm'),
                             ('t1_mask', 'in_mask')]),
        (inputnode, acc_roi, [('t1_mask', 'in_mask')]),
        (acc_tpm, acc_roi, [('out_file', 'in_tpm')]),
        # Map ROIs to BOLD
        (inputnode, csf_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (csf_roi, csf_tfm, [('roi_file', 'input_image')]),
        (inputnode, wm_tfm, [('bold_mask', 'reference_image'),
                             ('t1_bold_xform', 'transforms')]),
        (wm_roi, wm_tfm, [('roi_file', 'input_image')]),
        (inputnode, acc_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (acc_roi, acc_tfm, [('roi_file', 'input_image')]),
        (inputnode, tcc_tfm, [('bold_mask', 'reference_image'),
                              ('t1_bold_xform', 'transforms')]),
        (csf_roi, tcc_tfm, [('eroded_mask', 'input_image')]),
        # Mask ROIs with bold_mask
        (inputnode, csf_msk, [('bold_mask', 'in_mask')]),
        (inputnode, wm_msk, [('bold_mask', 'in_mask')]),
        (inputnode, acc_msk, [('bold_mask', 'in_mask')]),
        (inputnode, tcc_msk, [('bold_mask', 'in_mask')]),
        # connect inputnode to each non-anatomical confound node
        (inputnode, dvars, [('bold', 'in_file'), ('bold_mask', 'in_mask')]),
        (inputnode, fdisp, [('movpar_file', 'in_file')]),

        # tCompCor
        (inputnode, tcompcor, [('bold', 'realigned_file')]),
        (inputnode, tcompcor, [('skip_vols', 'ignore_initial_volumes')]),
        (tcc_tfm, tcc_msk, [('output_image', 'roi_file')]),
        (tcc_msk, tcompcor, [('out', 'mask_files')]),

        # aCompCor
        (inputnode, acompcor, [('bold', 'realigned_file')]),
        (inputnode, acompcor, [('skip_vols', 'ignore_initial_volumes')]),
        (acc_tfm, acc_msk, [('output_image', 'roi_file')]),
        (acc_msk, mrg_lbl_cc, [('out', 'in1')]),
        (csf_msk, mrg_lbl_cc, [('out', 'in2')]),
        (wm_msk, mrg_lbl_cc, [('out', 'in3')]),
        (mrg_lbl_cc, acompcor, [('out', 'mask_files')]),

        # Global signals extraction (constrained by anatomy)
        (inputnode, signals, [('bold', 'in_file')]),
        (csf_tfm, csf_msk, [('output_image', 'roi_file')]),
        (csf_msk, mrg_lbl, [('out', 'in1')]),
        (wm_tfm, wm_msk, [('output_image', 'roi_file')]),
        (wm_msk, mrg_lbl, [('out', 'in2')]),
        (inputnode, mrg_lbl, [('bold_mask', 'in3')]),
        (mrg_lbl, signals, [('out', 'label_files')]),

        # Collate computed confounds together
        (inputnode, add_motion_headers, [('movpar_file', 'in_file')]),
        (dvars, add_dvars_header, [('out_nstd', 'in_file')]),
        (dvars, add_std_dvars_header, [('out_std', 'in_file')]),
        (signals, concat, [('out_file', 'signals')]),
        (fdisp, concat, [('out_file', 'fd')]),
        (tcompcor, concat, [('components_file', 'tcompcor'),
                            ('pre_filter_file', 'cos_basis')]),
        (acompcor, concat, [('components_file', 'acompcor')]),
        (add_motion_headers, concat, [('out_file', 'motion')]),
        (add_dvars_header, concat, [('out_file', 'dvars')]),
        (add_std_dvars_header, concat, [('out_file', 'std_dvars')]),

        # Confounds metadata
        (tcompcor, tcc_metadata_fmt, [('metadata_file', 'in_file')]),
        (acompcor, acc_metadata_fmt, [('metadata_file', 'in_file')]),
        (tcc_metadata_fmt, mrg_conf_metadata, [('output', 'in1')]),
        (acc_metadata_fmt, mrg_conf_metadata, [('output', 'in2')]),
        (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),

        # Expand the model with derivatives, quadratics, and spikes
        (concat, model_expand, [('confounds_file', 'confounds_file')]),
        (model_expand, spike_regress, [('confounds_file', 'confounds_file')]),

        # Set outputs
        (spike_regress, outputnode, [('confounds_file', 'confounds_file')]),
        (mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
        (inputnode, rois_plot, [('bold', 'in_file'),
                                ('bold_mask', 'in_mask')]),
        (tcompcor, mrg_compcor, [('high_variance_masks', 'in1')]),
        (acc_msk, mrg_compcor, [('out', 'in2')]),
        (mrg_compcor, rois_plot, [('out', 'in_rois')]),
        (rois_plot, ds_report_bold_rois, [('out_report', 'in_file')]),
        (tcompcor, mrg_cc_metadata, [('metadata_file', 'in1')]),
        (acompcor, mrg_cc_metadata, [('metadata_file', 'in2')]),
        (mrg_cc_metadata, compcor_plot, [('out', 'metadata_files')]),
        (compcor_plot, ds_report_compcor, [('out_file', 'in_file')]),
        (concat, conf_corr_plot, [('confounds_file', 'confounds_file')]),
        (conf_corr_plot, ds_report_conf_corr, [('out_file', 'in_file')]),
    ])

    return workflow
Ejemplo n.º 41
0
def init_bold_preproc_report_wf(mem_gb,
                                reportlets_dir,
                                name='bold_preproc_report_wf'):
    """
    This workflow generates and saves a reportlet showing the effect of resampling
    the BOLD signal using the standard deviation maps.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.resampling import init_bold_preproc_report_wf
        wf = init_bold_preproc_report_wf(mem_gb=1, reportlets_dir='.')

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB
        reportlets_dir : str
            Directory in which to save reportlets
        name : str, optional
            Workflow name (default: bold_preproc_report_wf)

    **Inputs**

        in_pre
            BOLD time-series, before resampling
        in_post
            BOLD time-series, after resampling
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing

    """

    from nipype.algorithms.confounds import TSNR
    from niworkflows.interfaces import SimpleBeforeAfter

    workflow = Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=['in_pre', 'in_post', 'name_source']),
        name='inputnode')

    pre_tsnr = pe.Node(TSNR(), name='pre_tsnr', mem_gb=mem_gb * 4.5)
    pos_tsnr = pe.Node(TSNR(), name='pos_tsnr', mem_gb=mem_gb * 4.5)

    bold_rpt = pe.Node(SimpleBeforeAfter(), name='bold_rpt', mem_gb=0.1)
    bold_rpt_ds = pe.Node(DerivativesDataSink(base_directory=reportlets_dir,
                                              suffix='variant-preproc'),
                          name='bold_rpt_ds',
                          mem_gb=DEFAULT_MEMORY_MIN_GB,
                          run_without_submitting=True)

    workflow.connect([
        (inputnode, bold_rpt_ds, [('name_source', 'source_file')]),
        (inputnode, pre_tsnr, [('in_pre', 'in_file')]),
        (inputnode, pos_tsnr, [('in_post', 'in_file')]),
        (pre_tsnr, bold_rpt, [('stddev_file', 'before')]),
        (pos_tsnr, bold_rpt, [('stddev_file', 'after')]),
        (bold_rpt, bold_rpt_ds, [('out_report', 'in_file')]),
    ])

    return workflow
Ejemplo n.º 42
0
def init_ica_aroma_wf(metadata,
                      mem_gb,
                      omp_nthreads,
                      name='ica_aroma_wf',
                      susan_fwhm=6.0,
                      err_on_aroma_warn=False,
                      aroma_melodic_dim=-200,
                      use_fieldwarp=True):
    """
    This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
    independent components from a BOLD time series.

    The following steps are performed:

    #. Remove non-steady state volumes from the bold series.
    #. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
    #. Run FSL `melodic` outside of ICA-AROMA to generate the report
    #. Run ICA-AROMA
    #. Aggregate identified motion components (aggressive) to TSV
    #. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
       non-aggressive denoising in T1w space
    #. Calculate ICA-AROMA-identified noise components
       (columns named ``AROMAAggrCompXX``)

    Additionally, non-aggressive denoising is performed on the BOLD series
    resampled into MNI space.

    There is a current discussion on whether other confounds should be extracted
    before or after denoising `here <http://nbviewer.jupyter.org/github/poldracklab/\
    fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_\
    aroma_confounds.ipynb>`__.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
        wf = init_ica_aroma_wf(metadata={'RepetitionTime': 1.0},
                               mem_gb=3,
                               omp_nthreads=1)

    **Parameters**

        standard_spaces : str
            Spatial normalization template used as target when that
            registration step was previously calculated with
            :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`.
            The template must be one of the MNI templates (fMRIPrep uses
            ``MNI152NLin2009cAsym`` by default).
        metadata : dict
            BIDS metadata for BOLD file
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_tpl_trans_wf``)
        susan_fwhm : float
            Kernel width (FWHM in mm) for the smoothing step with
            FSL ``susan`` (default: 6.0mm)
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI
        err_on_aroma_warn : bool
            Do not fail on ICA-AROMA errors
        aroma_melodic_dim: int
            Set the dimensionality of the MELODIC ICA decomposition.
            Negative numbers set a maximum on automatic dimensionality estimation.
            Positive numbers set an exact number of components to extract.
            (default: -200, i.e., estimate <=200 components)

    **Inputs**

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        anat2std_xfm
            ANTs-compatible affine-and-warp transform file
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        skip_vols
            number of non steady state volumes
        bold_split
            Individual 3D BOLD volumes, not motion corrected
        bold_mask
            BOLD series mask in template space
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format
        movpar_file
            SPM-formatted motion parameters file

    **Outputs**

        aroma_confounds
            TSV of confounds identified as noise by ICA-AROMA
        aroma_noise_ics
            CSV of noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        nonaggr_denoised_file
            BOLD series with non-aggressive ICA-AROMA denoising applied

    .. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA

    """
    workflow = Workflow(name=name)
    workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_std',
        'bold_mask_std',
        'movpar_file',
        'name_source',
        'skip_vols',
        'templates',
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
        'nonaggr_denoised_file'
    ]),
                         name='outputnode')

    select_std = pe.Node(KeySelect(fields=['bold_mask_std', 'bold_std']),
                         name='select_std',
                         run_without_submitting=True)
    select_std.inputs.key = 'MNI152NLin6Asym'

    rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
                                               output_names=['bold_cut']),
                                  name='rm_nonsteady')

    calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'),
                              name='calc_median_val')
    calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')

    def _getusans_func(image, thresh):
        return [tuple([image, thresh])]

    getusans = pe.Node(niu.Function(function=_getusans_func,
                                    output_names=['usans']),
                       name='getusans',
                       mem_gb=0.01)

    smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')

    # melodic node
    melodic = pe.Node(fsl.MELODIC(no_bet=True,
                                  tr_sec=float(metadata['RepetitionTime']),
                                  mm_thresh=0.5,
                                  out_stats=True,
                                  dim=aroma_melodic_dim),
                      name="melodic")

    # ica_aroma node
    ica_aroma = pe.Node(ICA_AROMARPT(denoise_type='nonaggr',
                                     generate_report=True,
                                     TR=metadata['RepetitionTime']),
                        name='ica_aroma')

    add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
                                                output_names=['bold_add']),
                                   name='add_nonsteady')

    # extract the confound ICs from the results
    ica_aroma_confound_extraction = pe.Node(
        ICAConfounds(err_on_aroma_warn=err_on_aroma_warn),
        name='ica_aroma_confound_extraction')

    ds_report_ica_aroma = pe.Node(DerivativesDataSink(desc='aroma',
                                                      keep_dtype=True),
                                  name='ds_report_ica_aroma',
                                  run_without_submitting=True,
                                  mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _getbtthresh(medianval):
        return 0.75 * medianval

    # connect the nodes
    workflow.connect([
        (inputnode, select_std, [('templates', 'keys'),
                                 ('bold_std', 'bold_std'),
                                 ('bold_mask_std', 'bold_mask_std')]),
        (inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
        (inputnode, rm_non_steady_state, [('skip_vols', 'skip_vols')]),
        (select_std, rm_non_steady_state, [('bold_std', 'bold_file')]),
        (select_std, calc_median_val, [('bold_mask_std', 'mask_file')]),
        (rm_non_steady_state, calc_median_val, [('bold_cut', 'in_file')]),
        (rm_non_steady_state, calc_bold_mean, [('bold_cut', 'in_file')]),
        (calc_bold_mean, getusans, [('out_file', 'image')]),
        (calc_median_val, getusans, [('out_stat', 'thresh')]),
        # Connect input nodes to complete smoothing
        (rm_non_steady_state, smooth, [('bold_cut', 'in_file')]),
        (getusans, smooth, [('usans', 'usans')]),
        (calc_median_val, smooth, [(('out_stat', _getbtthresh),
                                    'brightness_threshold')]),
        # connect smooth to melodic
        (smooth, melodic, [('smoothed_file', 'in_files')]),
        (select_std, melodic, [('bold_mask_std', 'mask')]),
        # connect nodes to ICA-AROMA
        (smooth, ica_aroma, [('smoothed_file', 'in_file')]),
        (select_std, ica_aroma, [('bold_mask_std', 'report_mask'),
                                 ('bold_mask_std', 'mask')]),
        (melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
        # generate tsvs from ICA-AROMA
        (ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')
                                                    ]),
        (inputnode, ica_aroma_confound_extraction, [('skip_vols', 'skip_vols')
                                                    ]),
        # output for processing and reporting
        (ica_aroma_confound_extraction,
         outputnode, [('aroma_confounds', 'aroma_confounds'),
                      ('aroma_noise_ics', 'aroma_noise_ics'),
                      ('melodic_mix', 'melodic_mix')]),
        (ica_aroma, add_non_steady_state, [('nonaggr_denoised_file',
                                            'bold_cut_file')]),
        (select_std, add_non_steady_state, [('bold_std', 'bold_file')]),
        (inputnode, add_non_steady_state, [('skip_vols', 'skip_vols')]),
        (add_non_steady_state, outputnode, [('bold_add',
                                             'nonaggr_denoised_file')]),
        (ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
    ])

    return workflow
Ejemplo n.º 43
0
def init_bold_std_trans_wf(
    mem_gb,
    omp_nthreads,
    spaces,
    name="bold_std_trans_wf",
    use_compression=True,
    use_fieldwarp=False,
):
    """
    Sample fMRI into standard space with a single-step resampling of the original BOLD series.

    .. important::
        This workflow provides two outputnodes.
        One output node (with name ``poutputnode``) will be parameterized in a Nipype sense
        (see `Nipype iterables
        <https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a
        second node (``outputnode``) will collapse the parameterized outputs into synchronous
        lists of the output fields listed below.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from niworkflows.utils.spaces import SpatialReferences
            from fprodents.workflows.bold.resampling import init_bold_std_trans_wf
            wf = init_bold_std_trans_wf(
                mem_gb=3,
                omp_nthreads=1,
                spaces=SpatialReferences(
                    spaces=['MNI152Lin',
                            ('MNIPediatricAsym', {'cohort': '6'})],
                    checkpoint=True),
            )

    Parameters
    ----------
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
        A container for storing, organizing, and parsing spatial normalizations. Composed of
        :py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
        Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
        (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
        (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
        the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
        dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
        would lead to resampling on a 2mm resolution of the space).
    name : :obj:`str`
        Name of workflow (default: ``bold_std_trans_wf``)
    use_compression : :obj:`bool`
        Save registered BOLD series as ``.nii.gz``
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from BOLD to MNI

    Inputs
    ------
    anat2std_xfm
        List of anatomical-to-standard space transforms generated during
        spatial normalization.
    bold_mask
        Skull-stripping mask of reference image
    bold_split
        Individual 3D volumes, not motion corrected
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    bold2anat
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    templates
        List of templates that were applied as targets during
        spatial normalization.

    Outputs
    -------
    bold_std
        BOLD series, resampled to template space
    bold_std_ref
        Reference, contrast-enhanced summary of the BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    template
        Template identifiers synchronized correspondingly to previously
        described outputs.

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.nibabel import GenerateSamplingReference
    from niworkflows.interfaces.nilearn import Merge
    from niworkflows.utils.spaces import format_reference

    workflow = Workflow(name=name)
    output_references = spaces.cached.get_spaces(nonstandard=False, dim=(3, ))
    std_vol_references = [(s.fullname, s.spec) for s in spaces.references
                          if s.standard and s.dim == 3]

    if len(output_references) == 1:
        workflow.__desc__ = """\
The BOLD time-series were resampled into standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(tpl=output_references[0])
    elif len(output_references) > 1:
        workflow.__desc__ = """\
The BOLD time-series were resampled into several standard spaces,
correspondingly generating the following *spatially-normalized,
preprocessed BOLD runs*: {tpl}.
""".format(tpl=", ".join(output_references))

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "anat2std_xfm",
            "bold_mask",
            "bold_split",
            "fieldwarp",
            "hmc_xforms",
            "bold2anat",
            "name_source",
            "templates",
        ]),
        name="inputnode",
    )

    iterablesource = pe.Node(niu.IdentityInterface(fields=["std_target"]),
                             name="iterablesource")
    # Generate conversions for every template+spec at the input
    iterablesource.iterables = [("std_target", std_vol_references)]

    split_target = pe.Node(
        niu.Function(
            function=_split_spec,
            input_names=["in_target"],
            output_names=["space", "template", "spec"],
        ),
        run_without_submitting=True,
        name="split_target",
    )

    select_std = pe.Node(
        KeySelect(fields=["anat2std_xfm"]),
        name="select_std",
        run_without_submitting=True,
    )

    select_tpl = pe.Node(
        niu.Function(function=_select_template),
        name="select_tpl",
        run_without_submitting=True,
    )

    gen_ref = pe.Node(GenerateSamplingReference(), name="gen_ref",
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB)

    mask_std_tfm = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                           name="mask_std_tfm",
                           mem_gb=1)

    ref_std_tfm = pe.Node(ApplyTransforms(interpolation="LanczosWindowedSinc"),
                          name="ref_std_tfm",
                          mem_gb=1)

    # Write corrected file in the designated output dir
    mask_merge_tfms = pe.Node(
        niu.Merge(2),
        name="mask_merge_tfms",
        run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB,
    )

    nxforms = 3 + use_fieldwarp
    merge_xforms = pe.Node(
        niu.Merge(nxforms),
        name="merge_xforms",
        run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB,
    )
    workflow.connect([(inputnode, merge_xforms, [("hmc_xforms",
                                                  "in%d" % nxforms)])])

    if use_fieldwarp:
        workflow.connect([(inputnode, merge_xforms, [("fieldwarp", "in3")])])

    bold_to_std_transform = pe.Node(
        MultiApplyTransforms(interpolation="LanczosWindowedSinc",
                             float=True,
                             copy_dtype=True),
        name="bold_to_std_transform",
        mem_gb=mem_gb * 3 * omp_nthreads,
        n_procs=omp_nthreads,
    )

    merge = pe.Node(Merge(compress=use_compression),
                    name="merge",
                    mem_gb=mem_gb * 3)

    # fmt:off
    workflow.connect([
        (iterablesource, split_target, [('std_target', 'in_target')]),
        (iterablesource, select_tpl, [('std_target', 'template')]),
        (inputnode, select_std, [('anat2std_xfm', 'anat2std_xfm'),
                                 ('templates', 'keys')]),
        (inputnode, mask_std_tfm, [('bold_mask', 'input_image')]),
        (inputnode, ref_std_tfm, [('bold_mask', 'input_image')]),
        (inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]),
        (inputnode, merge_xforms, [(('bold2anat', _aslist), 'in2')]),
        (inputnode, merge, [('name_source', 'header_source')]),
        (inputnode, mask_merge_tfms, [(('bold2anat', _aslist), 'in2')]),
        (inputnode, bold_to_std_transform, [('bold_split', 'input_image')]),
        (split_target, select_std, [('space', 'key')]),
        (select_std, merge_xforms, [('anat2std_xfm', 'in1')]),
        (select_std, mask_merge_tfms, [('anat2std_xfm', 'in1')]),
        (split_target, gen_ref, [(('spec', _is_native), 'keep_native')]),
        (select_tpl, gen_ref, [('out', 'fixed_image')]),
        (merge_xforms, bold_to_std_transform, [('out', 'transforms')]),
        (gen_ref, bold_to_std_transform, [('out_file', 'reference_image')]),
        (gen_ref, mask_std_tfm, [('out_file', 'reference_image')]),
        (mask_merge_tfms, mask_std_tfm, [('out', 'transforms')]),
        (gen_ref, ref_std_tfm, [('out_file', 'reference_image')]),
        (mask_merge_tfms, ref_std_tfm, [('out', 'transforms')]),
        (bold_to_std_transform, merge, [('out_files', 'in_files')]),
    ])
    # fmt:on

    output_names = [
        "bold_mask_std",
        "bold_std",
        "bold_std_ref",
        "spatial_reference",
        "template",
    ]

    poutputnode = pe.Node(niu.IdentityInterface(fields=output_names),
                          name="poutputnode")
    # fmt:off
    workflow.connect([
        # Connecting outputnode
        (iterablesource, poutputnode, [(('std_target', format_reference),
                                        'spatial_reference')]),
        (merge, poutputnode, [('out_file', 'bold_std')]),
        (ref_std_tfm, poutputnode, [('output_image', 'bold_std_ref')]),
        (mask_std_tfm, poutputnode, [('output_image', 'bold_mask_std')]),
        (select_std, poutputnode, [('key', 'template')]),
    ])
    # fmt:on

    # Connect parametric outputs to a Join outputnode
    outputnode = pe.JoinNode(
        niu.IdentityInterface(fields=output_names),
        name="outputnode",
        joinsource="iterablesource",
    )
    # fmt:off
    workflow.connect([
        (poutputnode, outputnode, [(f, f) for f in output_names]),
    ])
    # fmt:on
    return workflow
Ejemplo n.º 44
0
def init_carpetplot_wf(mem_gb, metadata, name="bold_carpet_wf"):
    """

    Resamples the MNI parcellation (ad-hoc parcellation derived from the
    Harvard-Oxford template and others).

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB - please note that this size
            should be calculated after resamplings that may extend
            the FoV
        metadata : dict
            BIDS metadata for BOLD file
        name : str
            Name of workflow (default: ``bold_carpet_wf``)

    **Inputs**

        bold
            BOLD image, after the prescribed corrections (STC, HMC and SDC)
            when available.
        bold_mask
            BOLD series mask
        confounds_file
            TSV of all aggregated confounds
        t1_bold_xform
            Affine matrix that maps the T1w space into alignment with
            the native BOLD space
        t1_2_mni_reverse_transform
            ANTs-compatible affine-and-warp transform file

    **Outputs**

        out_carpetplot
            Path of the generated SVG file

    """
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold', 'bold_mask', 'confounds_file',
                't1_bold_xform', 't1_2_mni_reverse_transform']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['out_carpetplot']), name='outputnode')

    # List transforms
    mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms')

    # Warp segmentation into EPI space
    resample_parc = pe.Node(ApplyTransforms(
        float=True,
        input_image=str(
            get_template('MNI152NLin2009cAsym') /
            'tpl-MNI152NLin2009cAsym_space-MNI_res-01_label-carpet_atlas.nii.gz'),
        dimension=3, default_value=0, interpolation='MultiLabel'),
        name='resample_parc')

    # Carpetplot and confounds plot
    conf_plot = pe.Node(FMRISummary(
        tr=metadata['RepetitionTime'],
        confounds_list=[
            ('global_signal', None, 'GS'),
            ('csf', None, 'GSCSF'),
            ('white_matter', None, 'GSWM'),
            ('std_dvars', None, 'DVARS'),
            ('framewise_displacement', 'mm', 'FD')]),
        name='conf_plot', mem_gb=mem_gb)
    ds_report_bold_conf = pe.Node(
        DerivativesDataSink(suffix='carpetplot'),
        name='ds_report_bold_conf', run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow = Workflow(name=name)
    workflow.connect([
        (inputnode, mrg_xfms, [('t1_bold_xform', 'in1'),
                               ('t1_2_mni_reverse_transform', 'in2')]),
        (inputnode, resample_parc, [('bold_mask', 'reference_image')]),
        (mrg_xfms, resample_parc, [('out', 'transforms')]),
        # Carpetplot
        (inputnode, conf_plot, [
            ('bold', 'in_func'),
            ('bold_mask', 'in_mask'),
            ('confounds_file', 'confounds_file')]),
        (resample_parc, conf_plot, [('output_image', 'in_segm')]),
        (conf_plot, ds_report_bold_conf, [('out_file', 'in_file')]),
        (conf_plot, outputnode, [('out_file', 'out_carpetplot')]),
    ])
    return workflow
Ejemplo n.º 45
0
def init_bold_preproc_trans_wf(
    mem_gb,
    omp_nthreads,
    name="bold_preproc_trans_wf",
    use_compression=True,
    use_fieldwarp=False,
    split_file=False,
    interpolation="LanczosWindowedSinc",
):
    """
    Resample in native (original) space.

    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from fprodents.workflows.bold.resampling import init_bold_preproc_trans_wf
            wf = init_bold_preproc_trans_wf(mem_gb=3, omp_nthreads=1)

    Parameters
    ----------
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_std_trans_wf``)
    use_compression : :obj:`bool`
        Save registered BOLD series as ``.nii.gz``
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from BOLD to MNI
    split_file : :obj:`bool`
        Whether the input file should be splitted (it is a 4D file)
        or it is a list of 3D files (default ``False``, do not split)
    interpolation : :obj:`str`
        Interpolation type to be used by ANTs' ``applyTransforms``
        (default ``'LanczosWindowedSinc'``)

    Inputs
    ------
    bold_file
        Individual 3D volumes, not motion corrected
    bold_mask
        Skull-stripping mask of reference image
    bold_ref
        BOLD reference image: an average-like 3D image of the time-series
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    bold
        BOLD series, resampled in native space, including all preprocessing

    """
    from bids.utils import listify
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.nilearn import Merge

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions""" if use_fieldwarp else """\
the transforms to correct for head-motion""")

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "name_source", "bold_file", "bold_mask", "bold_ref", "hmc_xforms",
            "fieldwarp"
        ]),
        name="inputnode",
    )

    outputnode = pe.Node(niu.IdentityInterface(fields=["bold"]),
                         name="outputnode")

    bold_transform = pe.Node(
        MultiApplyTransforms(interpolation=interpolation,
                             float=True,
                             copy_dtype=True),
        name="bold_transform",
        mem_gb=mem_gb * 3 * omp_nthreads,
        n_procs=omp_nthreads,
    )

    merge = pe.Node(Merge(compress=use_compression),
                    name="merge",
                    mem_gb=mem_gb * 3)

    # fmt:off
    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (bold_transform, merge, [('out_files', 'in_files')]),
        (inputnode, bold_transform, [(('hmc_xforms', listify), 'transforms'),
                                     ('bold_ref', 'reference_image')]),
        (merge, outputnode, [('out_file', 'bold')]),
    ])
    # fmt:on

    # Input file is not splitted
    if split_file:
        bold_split = pe.Node(FSLSplit(dimension="t"),
                             name="bold_split",
                             mem_gb=mem_gb * 3)
        # fmt:off
        workflow.connect([(inputnode, bold_split, [('bold_file', 'in_file')]),
                          (bold_split, bold_transform, [('out_files',
                                                         'input_image')])])
        # fmt:on
    else:
        # fmt:off
        workflow.connect([
            (inputnode, bold_transform, [('bold_file', 'input_image')]),
        ])
        # fmt:on

    return workflow
Ejemplo n.º 46
0
def init_ica_aroma_wf(template, metadata, mem_gb, omp_nthreads,
                      name='ica_aroma_wf',
                      susan_fwhm=6.0,
                      ignore_aroma_err=False,
                      aroma_melodic_dim=-200,
                      use_fieldwarp=True):
    """
    This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
    independent components from a BOLD time series.

    The following steps are performed:

    #. Remove non-steady state volumes from the bold series.
    #. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
    #. Run FSL `melodic` outside of ICA-AROMA to generate the report
    #. Run ICA-AROMA
    #. Aggregate identified motion components (aggressive) to TSV
    #. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
       non-aggressive denoising in T1w space
    #. Calculate ICA-AROMA-identified noise components
       (columns named ``AROMAAggrCompXX``)

    Additionally, non-aggressive denoising is performed on the BOLD series
    resampled into MNI space.

    There is a current discussion on whether other confounds should be extracted
    before or after denoising `here <http://nbviewer.jupyter.org/github/poldracklab/\
    fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_\
    aroma_confounds.ipynb>`__.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
        wf = init_ica_aroma_wf(template='MNI152NLin2009cAsym',
                               metadata={'RepetitionTime': 1.0},
                               mem_gb=3,
                               omp_nthreads=1)

    **Parameters**

        template : str
            Spatial normalization template used as target when that
            registration step was previously calculated with
            :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`.
            The template must be one of the MNI templates (fMRIPrep uses
            ``MNI152NLin2009cAsym`` by default).
        metadata : dict
            BIDS metadata for BOLD file
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_mni_trans_wf``)
        susan_fwhm : float
            Kernel width (FWHM in mm) for the smoothing step with
            FSL ``susan`` (default: 6.0mm)
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors
        aroma_melodic_dim: int
            Set the dimensionality of the MELODIC ICA decomposition.
            Negative numbers set a maximum on automatic dimensionality estimation.
            Positive numbers set an exact number of components to extract.
            (default: -200, i.e., estimate <=200 components)

    **Inputs**

        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        t1_2_mni_forward_transform
            ANTs-compatible affine-and-warp transform file
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        skip_vols
            number of non steady state volumes
        bold_split
            Individual 3D BOLD volumes, not motion corrected
        bold_mask
            BOLD series mask in template space
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format
        movpar_file
            SPM-formatted motion parameters file

    **Outputs**

        aroma_confounds
            TSV of confounds identified as noise by ICA-AROMA
        aroma_noise_ics
            CSV of noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        nonaggr_denoised_file
            BOLD series with non-aggressive ICA-AROMA denoising applied

    .. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA

    """
    workflow = Workflow(name=name)
    workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""

    inputnode = pe.Node(niu.IdentityInterface(
        fields=[
            'itk_bold_to_t1',
            't1_2_mni_forward_transform',
            'name_source',
            'skip_vols',
            'bold_split',
            'bold_mask',
            'hmc_xforms',
            'fieldwarp',
            'movpar_file']), name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
                'nonaggr_denoised_file']), name='outputnode')

    bold_mni_trans_wf = init_bold_mni_trans_wf(
        template=template,
        freesurfer=False,
        mem_gb=mem_gb,
        omp_nthreads=omp_nthreads,
        template_out_grid=str(
            get_template('MNI152Lin') / 'tpl-MNI152Lin_space-MNI_res-02_T1w.nii.gz'),
        use_compression=False,
        use_fieldwarp=use_fieldwarp,
        name='bold_mni_trans_wf'
    )
    bold_mni_trans_wf.__desc__ = None

    rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
                                               output_names=['bold_cut']),
                                  name='rm_nonsteady')

    calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'), name='calc_median_val')
    calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')

    def _getusans_func(image, thresh):
        return [tuple([image, thresh])]
    getusans = pe.Node(niu.Function(function=_getusans_func, output_names=['usans']),
                       name='getusans', mem_gb=0.01)

    smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')

    # melodic node
    melodic = pe.Node(fsl.MELODIC(
        no_bet=True, tr_sec=float(metadata['RepetitionTime']), mm_thresh=0.5, out_stats=True,
        dim=aroma_melodic_dim), name="melodic")

    # ica_aroma node
    ica_aroma = pe.Node(ICA_AROMARPT(
        denoise_type='nonaggr', generate_report=True, TR=metadata['RepetitionTime']),
        name='ica_aroma')

    add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
                                                output_names=['bold_add']),
                                   name='add_nonsteady')

    # extract the confound ICs from the results
    ica_aroma_confound_extraction = pe.Node(ICAConfounds(ignore_aroma_err=ignore_aroma_err),
                                            name='ica_aroma_confound_extraction')

    ds_report_ica_aroma = pe.Node(
        DerivativesDataSink(suffix='ica_aroma'),
        name='ds_report_ica_aroma', run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB)

    def _getbtthresh(medianval):
        return 0.75 * medianval

    # connect the nodes
    workflow.connect([
        (inputnode, bold_mni_trans_wf, [
            ('name_source', 'inputnode.name_source'),
            ('bold_split', 'inputnode.bold_split'),
            ('bold_mask', 'inputnode.bold_mask'),
            ('hmc_xforms', 'inputnode.hmc_xforms'),
            ('itk_bold_to_t1', 'inputnode.itk_bold_to_t1'),
            ('t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'),
            ('fieldwarp', 'inputnode.fieldwarp')]),
        (inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
        (inputnode, rm_non_steady_state, [
            ('skip_vols', 'skip_vols')]),
        (bold_mni_trans_wf, rm_non_steady_state, [
            ('outputnode.bold_mni', 'bold_file')]),
        (bold_mni_trans_wf, calc_median_val, [
            ('outputnode.bold_mask_mni', 'mask_file')]),
        (rm_non_steady_state, calc_median_val, [
            ('bold_cut', 'in_file')]),
        (rm_non_steady_state, calc_bold_mean, [
            ('bold_cut', 'in_file')]),
        (calc_bold_mean, getusans, [('out_file', 'image')]),
        (calc_median_val, getusans, [('out_stat', 'thresh')]),
        # Connect input nodes to complete smoothing
        (rm_non_steady_state, smooth, [
            ('bold_cut', 'in_file')]),
        (getusans, smooth, [('usans', 'usans')]),
        (calc_median_val, smooth, [(('out_stat', _getbtthresh), 'brightness_threshold')]),
        # connect smooth to melodic
        (smooth, melodic, [('smoothed_file', 'in_files')]),
        (bold_mni_trans_wf, melodic, [
            ('outputnode.bold_mask_mni', 'mask')]),
        # connect nodes to ICA-AROMA
        (smooth, ica_aroma, [('smoothed_file', 'in_file')]),
        (bold_mni_trans_wf, ica_aroma, [
            ('outputnode.bold_mask_mni', 'report_mask'),
            ('outputnode.bold_mask_mni', 'mask')]),
        (melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
        # generate tsvs from ICA-AROMA
        (ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')]),
        (inputnode, ica_aroma_confound_extraction, [
            ('skip_vols', 'skip_vols')]),
        # output for processing and reporting
        (ica_aroma_confound_extraction, outputnode, [('aroma_confounds', 'aroma_confounds'),
                                                     ('aroma_noise_ics', 'aroma_noise_ics'),
                                                     ('melodic_mix', 'melodic_mix')]),
        # TODO change melodic report to reflect noise and non-noise components
        (ica_aroma, add_non_steady_state, [
            ('nonaggr_denoised_file', 'bold_cut_file')]),
        (bold_mni_trans_wf, add_non_steady_state, [
            ('outputnode.bold_mni', 'bold_file')]),
        (inputnode, add_non_steady_state, [
            ('skip_vols', 'skip_vols')]),
        (add_non_steady_state, outputnode, [('bold_add', 'nonaggr_denoised_file')]),
        (ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
    ])

    return workflow
Ejemplo n.º 47
0
def init_bold_grayords_wf(grayord_density,
                          mem_gb,
                          repetition_time,
                          name="bold_grayords_wf"):
    """
    Sample Grayordinates files onto the fsLR atlas.

    Outputs are in CIFTI2 format.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from fprodents.workflows.bold.resampling import init_bold_grayords_wf
            wf = init_bold_grayords_wf(mem_gb=0.1, grayord_density='91k')

    Parameters
    ----------
    grayord_density : :obj:`str`
        Either `91k` or `170k`, representing the total of vertices or *grayordinates*.
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    name : :obj:`str`
        Unique name for the subworkflow (default: ``'bold_grayords_wf'``)

    Inputs
    ------
    bold_std : :obj:`str`
        List of BOLD conversions to standard spaces.
    spatial_reference :obj:`str`
        List of unique identifiers corresponding to the BOLD standard-conversions.
    subjects_dir : :obj:`str`
        FreeSurfer's subjects directory.
    surf_files : :obj:`str`
        List of BOLD files resampled on the fsaverage (ico7) surfaces.
    surf_refs :
        List of unique identifiers corresponding to the BOLD surface-conversions.

    Outputs
    -------
    cifti_bold : :obj:`str`
        List of BOLD grayordinates files - (L)eft and (R)ight.
    cifti_variant : :obj:`str`
        Only ``'HCP Grayordinates'`` is currently supported.
    cifti_metadata : :obj:`str`
        Path of metadata files corresponding to ``cifti_bold``.
    cifti_density : :obj:`str`
        Density (i.e., either `91k` or `170k`) of ``cifti_bold``.

    """
    import templateflow.api as tf
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.cifti import GenerateCifti
    from niworkflows.interfaces.utility import KeySelect

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
*Grayordinates* files [@hcppipelines] containing {density} samples were also
generated using the highest-resolution ``fsaverage`` as intermediate standardized
surface space.
""".format(density=grayord_density)

    fslr_density, mni_density = (("32k", "2") if grayord_density == "91k" else
                                 ("59k", "1"))

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "bold_std",
            "spatial_reference",
            "subjects_dir",
            "surf_files",
            "surf_refs",
        ]),
        name="inputnode",
    )

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "cifti_bold", "cifti_variant", "cifti_metadata", "cifti_density"
        ]),
        name="outputnode",
    )

    # extract out to BOLD base
    select_std = pe.Node(
        KeySelect(fields=["bold_std"]),
        name="select_std",
        run_without_submitting=True,
        nohash=True,
    )
    select_std.inputs.key = "MNI152NLin6Asym_res-%s" % mni_density

    select_fs_surf = pe.Node(
        KeySelect(fields=["surf_files"]),
        name="select_fs_surf",
        run_without_submitting=True,
        mem_gb=DEFAULT_MEMORY_MIN_GB,
    )
    select_fs_surf.inputs.key = "fsaverage"

    # Setup Workbench command. LR ordering for hemi can be assumed, as it is imposed
    # by the iterfield of the MapNode in the surface sampling workflow above.
    resample = pe.MapNode(
        wb.MetricResample(method="ADAP_BARY_AREA", area_metrics=True),
        name="resample",
        iterfield=[
            "in_file",
            "out_file",
            "new_sphere",
            "new_area",
            "current_sphere",
            "current_area",
        ],
    )
    resample.inputs.current_sphere = [
        str(
            tf.get("fsaverage",
                   hemi=hemi,
                   density="164k",
                   desc="std",
                   suffix="sphere")) for hemi in "LR"
    ]
    resample.inputs.current_area = [
        str(
            tf.get(
                "fsaverage",
                hemi=hemi,
                density="164k",
                desc="vaavg",
                suffix="midthickness",
            )) for hemi in "LR"
    ]
    resample.inputs.new_sphere = [
        str(
            tf.get(
                "fsLR",
                space="fsaverage",
                hemi=hemi,
                density=fslr_density,
                suffix="sphere",
            )) for hemi in "LR"
    ]
    resample.inputs.new_area = [
        str(
            tf.get(
                "fsLR",
                hemi=hemi,
                density=fslr_density,
                desc="vaavg",
                suffix="midthickness",
            )) for hemi in "LR"
    ]
    resample.inputs.out_file = [
        "space-fsLR_hemi-%s_den-%s_bold.gii" % (h, grayord_density)
        for h in "LR"
    ]

    gen_cifti = pe.Node(
        GenerateCifti(
            volume_target="MNI152NLin6Asym",
            surface_target="fsLR",
            TR=repetition_time,
            surface_density=fslr_density,
        ),
        name="gen_cifti",
    )

    # fmt:off
    workflow.connect([
        (inputnode, gen_cifti, [('subjects_dir', 'subjects_dir')]),
        (inputnode, select_std, [('bold_std', 'bold_std'),
                                 ('spatial_reference', 'keys')]),
        (inputnode, select_fs_surf, [('surf_files', 'surf_files'),
                                     ('surf_refs', 'keys')]),
        (select_fs_surf, resample, [('surf_files', 'in_file')]),
        (select_std, gen_cifti, [('bold_std', 'bold_file')]),
        (resample, gen_cifti, [('out_file', 'surface_bolds')]),
        (gen_cifti, outputnode, [('out_file', 'cifti_bold'),
                                 ('variant', 'cifti_variant'),
                                 ('out_metadata', 'cifti_metadata'),
                                 ('density', 'cifti_density')]),
    ])
    # fmt:on
    return workflow
Ejemplo n.º 48
0
def init_bold_hmc_wf(mem_gb, omp_nthreads, name='bold_hmc_wf'):
    """
    This workflow estimates the motion parameters to perform
    :abbr:`HMC (head motion correction)` over the input
    :abbr:`BOLD (blood-oxygen-level dependent)` image.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_hmc_wf
        wf = init_bold_hmc_wf(
            mem_gb=3,
            omp_nthreads=1)

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_hmc_wf``)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        raw_ref_image
            Reference image to which BOLD series is motion corrected

    **Outputs**

        xforms
            ITKTransform file aligning each volume to ``ref_image``
        movpar_file
            MCFLIRT motion parameters, normalized to SPM format (X, Y, Z, Rx, Ry, Rz)

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
Head-motion parameters with respect to the BOLD reference
(transformation matrices, and six corresponding rotation and translation
parameters) are estimated before any spatiotemporal filtering using
`mcflirt` [FSL {fsl_ver}, @mcflirt].
""".format(fsl_ver=fsl.Info().version() or '<ver>')

    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'raw_ref_image']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['xforms', 'movpar_file']),
        name='outputnode')

    # Head motion correction (hmc)
    mcflirt = pe.Node(fsl.MCFLIRT(save_mats=True, save_plots=True),
                      name='mcflirt', mem_gb=mem_gb * 3)

    fsl2itk = pe.Node(MCFLIRT2ITK(), name='fsl2itk',
                      mem_gb=0.05, n_procs=omp_nthreads)

    normalize_motion = pe.Node(NormalizeMotionParams(format='FSL'),
                               name="normalize_motion",
                               mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, mcflirt, [('raw_ref_image', 'ref_file'),
                              ('bold_file', 'in_file')]),
        (inputnode, fsl2itk, [('raw_ref_image', 'in_source'),
                              ('raw_ref_image', 'in_reference')]),
        (mcflirt, fsl2itk, [('mat_file', 'in_files')]),
        (mcflirt, normalize_motion, [('par_file', 'in_file')]),
        (fsl2itk, outputnode, [('out_file', 'xforms')]),
        (normalize_motion, outputnode, [('out_file', 'movpar_file')]),
    ])

    return workflow
Ejemplo n.º 49
0
def init_bold_t1_trans_wf(freesurfer,
                          mem_gb,
                          omp_nthreads,
                          use_compression=True,
                          name='bold_t1_trans_wf'):
    """
    Co-register the reference BOLD image to T1w-space.

    The workflow uses :abbr:`BBR (boundary-based registration)`.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.registration import init_bold_t1_trans_wf
            wf = init_bold_t1_trans_wf(freesurfer=True,
                                       mem_gb=3,
                                       omp_nthreads=1)

    Parameters
    ----------
    freesurfer : :obj:`bool`
        Enable FreeSurfer functional registration (bbregister)
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    use_compression : :obj:`bool`
        Save registered BOLD series as ``.nii.gz``
    name : :obj:`str`
        Name of workflow (default: ``bold_reg_wf``)

    Inputs
    ------
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    ref_bold_brain
        Reference image to which BOLD series is aligned
        If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
    ref_bold_mask
        Skull-stripping mask of reference image
    t1w_brain
        Skull-stripped bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_aseg
        FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
        (only if ``recon-all`` was run).
    t1w_aparc
        FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
        (only if ``recon-all`` was run).
    bold_split
        Individual 3D BOLD volumes, not motion corrected
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    bold_t1
        Motion-corrected BOLD series in T1 space
    bold_t1_ref
        Reference, contrast-enhanced summary of the motion-corrected BOLD series in T1w space
    bold_mask_t1
        BOLD mask in T1 space
    bold_aseg_t1
        FreeSurfer's ``aseg.mgz`` atlas, in T1w-space at the BOLD resolution
        (only if ``recon-all`` was run).
    bold_aparc_t1
        FreeSurfer's ``aparc+aseg.mgz`` atlas, in T1w-space at the BOLD resolution
        (only if ``recon-all`` was run).

    See also
    --------
      * :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
      * :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.nibabel import GenerateSamplingReference
    from niworkflows.interfaces.nilearn import Merge

    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'name_source', 'ref_bold_brain', 'ref_bold_mask', 't1w_brain',
        't1w_mask', 't1w_aseg', 't1w_aparc', 'bold_split', 'fieldwarp',
        'hmc_xforms', 'itk_bold_to_t1'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1'
    ]),
                         name='outputnode')

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB

    mask_t1w_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                           name='mask_t1w_tfm',
                           mem_gb=0.1)

    workflow.connect([
        (inputnode, gen_ref, [('ref_bold_brain', 'moving_image'),
                              ('t1w_brain', 'fixed_image'),
                              ('t1w_mask', 'fov_mask')]),
        (inputnode, mask_t1w_tfm, [('ref_bold_mask', 'input_image')]),
        (gen_ref, mask_t1w_tfm, [('out_file', 'reference_image')]),
        (inputnode, mask_t1w_tfm, [('itk_bold_to_t1', 'transforms')]),
        (mask_t1w_tfm, outputnode, [('output_image', 'bold_mask_t1')]),
    ])

    if freesurfer:
        # Resample aseg and aparc in T1w space (no transforms needed)
        aseg_t1w_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                               transforms='identity'),
                               name='aseg_t1w_tfm',
                               mem_gb=0.1)
        aparc_t1w_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                transforms='identity'),
                                name='aparc_t1w_tfm',
                                mem_gb=0.1)

        workflow.connect([
            (inputnode, aseg_t1w_tfm, [('t1w_aseg', 'input_image')]),
            (inputnode, aparc_t1w_tfm, [('t1w_aparc', 'input_image')]),
            (gen_ref, aseg_t1w_tfm, [('out_file', 'reference_image')]),
            (gen_ref, aparc_t1w_tfm, [('out_file', 'reference_image')]),
            (aseg_t1w_tfm, outputnode, [('output_image', 'bold_aseg_t1')]),
            (aparc_t1w_tfm, outputnode, [('output_image', 'bold_aparc_t1')]),
        ])

    bold_to_t1w_transform = pe.Node(MultiApplyTransforms(
        interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
                                    name='bold_to_t1w_transform',
                                    mem_gb=mem_gb * 3 * omp_nthreads,
                                    n_procs=omp_nthreads)

    # merge 3D volumes into 4D timeseries
    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb)

    # Generate a reference on the target T1w space
    gen_final_ref = init_bold_reference_wf(omp_nthreads, pre_mask=True)

    # Merge transforms placing the head motion correction last
    merge_xforms = pe.Node(niu.Merge(3),
                           name='merge_xforms',
                           run_without_submitting=True,
                           mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (
            inputnode,
            merge_xforms,
            [
                ('hmc_xforms',
                 'in3'),  # May be 'identity' if HMC already applied
                ('fieldwarp',
                 'in2'),  # May be 'identity' if SDC already applied
                ('itk_bold_to_t1', 'in1')
            ]),
        (inputnode, bold_to_t1w_transform, [('bold_split', 'input_image')]),
        (merge_xforms, bold_to_t1w_transform, [('out', 'transforms')]),
        (gen_ref, bold_to_t1w_transform, [('out_file', 'reference_image')]),
        (bold_to_t1w_transform, merge, [('out_files', 'in_files')]),
        (merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
        (mask_t1w_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')
                                       ]),
        (merge, outputnode, [('out_file', 'bold_t1')]),
        (gen_final_ref, outputnode, [('outputnode.ref_image', 'bold_t1_ref')]),
    ])

    return workflow
Ejemplo n.º 50
0
def init_bold_t2s_wf(echo_times, mem_gb, omp_nthreads,
                     t2s_coreg=False, name='bold_t2s_wf'):
    """
    This workflow wraps the `tedana`_ `T2* workflow`_ to optimally
    combine multiple echos and derive a T2* map for optional use as a
    coregistration target.

    The following steps are performed:

    #. :abbr:`HMC (head motion correction)` on individual echo files.
    #. Compute the T2* map
    #. Create an optimally combined ME-EPI time series

    **Parameters**

        echo_times
            list of TEs associated with each echo
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        t2s_coreg : bool
            Use the calculated T2*-map for T2*-driven coregistration
        name : str
            Name of workflow (default: ``bold_t2s_wf``)

    **Inputs**

        bold_file
            list of individual echo files

    **Outputs**

        bold
            the optimally combined time series for all supplied echos
        bold_mask
            the binarized, skull-stripped adaptive T2* map
        bold_ref_brain
            the adaptive T2* map

    .. _tedana: https://github.com/me-ica/tedana
    .. _`T2* workflow`: https://tedana.readthedocs.io/en/latest/generated/tedana.workflows.t2smap_workflow.html#tedana.workflows.t2smap_workflow  # noqa

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A T2* map was estimated from the preprocessed BOLD by fitting to a monoexponential signal
decay model with log-linear regression.
For each voxel, the maximal number of echoes with reliable signal in that voxel were
used to fit the model.
The calculated T2* map was then used to optimally combine preprocessed BOLD across
echoes following the method described in [@posse_t2s].
The optimally combined time series was carried forward as the *preprocessed BOLD*{}.
""".format('' if not t2s_coreg else ', and the T2* map was also retained as the BOLD reference')

    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['bold', 'bold_mask', 'bold_ref_brain']),
                         name='outputnode')

    LOGGER.log(25, 'Generating T2* map and optimally combined ME-EPI time series.')

    t2smap_node = pe.Node(T2SMap(echo_times=echo_times), name='t2smap_node')
    skullstrip_t2smap_wf = init_skullstrip_bold_wf(name='skullstrip_t2smap_wf')

    workflow.connect([
        (inputnode, t2smap_node, [('bold_file', 'in_files')]),
        (t2smap_node, outputnode, [('optimal_comb', 'bold')]),
        (t2smap_node, skullstrip_t2smap_wf, [('t2star_map', 'inputnode.in_file')]),
        (skullstrip_t2smap_wf, outputnode, [
            ('outputnode.mask_file', 'bold_mask'),
            ('outputnode.skull_stripped_file', 'bold_ref_brain')]),
    ])

    return workflow
Ejemplo n.º 51
0
def init_phdiff_wf(omp_nthreads, name='phdiff_wf'):
    """
    Estimates the fieldmap using a phase-difference image and one or more
    magnitude images corresponding to two or more :abbr:`GRE (Gradient Echo sequence)`
    acquisitions. The `original code was taken from nipype
    <https://github.com/nipy/nipype/blob/master/nipype/workflows/dmri/fsl/artifacts.py#L514>`_.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.fieldmap.phdiff import init_phdiff_wf
        wf = init_phdiff_wf(omp_nthreads=1)


    Outputs::

      outputnode.fmap_ref - The average magnitude image, skull-stripped
      outputnode.fmap_mask - The brain mask applied to the fieldmap
      outputnode.fmap - The estimated fieldmap in Hz


    """

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A deformation field to correct for susceptibility distortions was estimated
based on a field map that was co-registered to the BOLD reference,
using a custom workflow of *fMRIPrep* derived from D. Greve's `epidewarp.fsl`
[script](http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl) and
further improvements of HCP Pipelines [@hcppipelines].
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=['magnitude', 'phasediff']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['fmap', 'fmap_ref', 'fmap_mask']), name='outputnode')

    def _pick1st(inlist):
        return inlist[0]

    # Read phasediff echo times
    meta = pe.Node(ReadSidecarJSON(), name='meta', mem_gb=0.01, run_without_submitting=True)

    # Merge input magnitude images
    magmrg = pe.Node(IntraModalMerge(), name='magmrg')

    # de-gradient the fields ("bias/illumination artifact")
    n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
                 name='n4', n_procs=omp_nthreads)
    bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True),
                  name='bet')
    ds_fmap_mask = pe.Node(DerivativesDataSink(suffix='fmap_mask'), name='ds_report_fmap_mask',
                           mem_gb=0.01, run_without_submitting=True)
    # uses mask from bet; outputs a mask
    # dilate = pe.Node(fsl.maths.MathsCommand(
    #     nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate')

    # phase diff -> radians
    pha2rads = pe.Node(niu.Function(function=siemens2rads), name='pha2rads')

    # FSL PRELUDE will perform phase-unwrapping
    prelude = pe.Node(fsl.PRELUDE(), name='prelude')

    denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere',
                                        kernel_size=3), name='denoise')

    demean = pe.Node(niu.Function(function=demean_image), name='demean')

    cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf")

    compfmap = pe.Node(Phasediff2Fieldmap(), name='compfmap')

    # The phdiff2fmap interface is equivalent to:
    # rad2rsec (using rads2radsec from nipype.workflows.dmri.fsl.utils)
    # pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='ComputeFieldmapFUGUE')
    # rsec2hz (divide by 2pi)

    workflow.connect([
        (inputnode, meta, [('phasediff', 'in_file')]),
        (inputnode, magmrg, [('magnitude', 'in_files')]),
        (magmrg, n4, [('out_avg', 'input_image')]),
        (n4, prelude, [('output_image', 'magnitude_file')]),
        (n4, bet, [('output_image', 'in_file')]),
        (bet, prelude, [('mask_file', 'mask_file')]),
        (inputnode, pha2rads, [('phasediff', 'in_file')]),
        (pha2rads, prelude, [('out', 'phase_file')]),
        (meta, compfmap, [('out_dict', 'metadata')]),
        (prelude, denoise, [('unwrapped_phase_file', 'in_file')]),
        (denoise, demean, [('out_file', 'in_file')]),
        (demean, cleanup_wf, [('out', 'inputnode.in_file')]),
        (bet, cleanup_wf, [('mask_file', 'inputnode.in_mask')]),
        (cleanup_wf, compfmap, [('outputnode.out_file', 'in_file')]),
        (compfmap, outputnode, [('out_file', 'fmap')]),
        (bet, outputnode, [('mask_file', 'fmap_mask'),
                           ('out_file', 'fmap_ref')]),
        (inputnode, ds_fmap_mask, [('phasediff', 'source_file')]),
        (bet, ds_fmap_mask, [('out_report', 'in_file')]),
    ])

    return workflow