Ejemplo n.º 1
0
def init_func_preproc_wf(bold_file,
                         ignore,
                         freesurfer,
                         use_bbr,
                         t2s_coreg,
                         bold2t1w_dof,
                         reportlets_dir,
                         output_spaces,
                         template,
                         output_dir,
                         omp_nthreads,
                         fmap_bspline,
                         fmap_demean,
                         use_syn,
                         force_syn,
                         use_aroma,
                         ignore_aroma_err,
                         aroma_melodic_dim,
                         medial_surface_nan,
                         cifti_output,
                         debug,
                         low_mem,
                         template_out_grid,
                         layout=None,
                         num_bold=1):
    """
    This workflow controls the functional preprocessing stages of FMRIPREP.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_func_preproc_wf
        wf = init_func_preproc_wf('/completely/made/up/path/sub-01_task-nback_bold.nii.gz',
                                  omp_nthreads=1,
                                  ignore=[],
                                  freesurfer=True,
                                  reportlets_dir='.',
                                  output_dir='.',
                                  template='MNI152NLin2009cAsym',
                                  output_spaces=['T1w', 'fsnative',
                                                 'template', 'fsaverage5'],
                                  debug=False,
                                  use_bbr=True,
                                  t2s_coreg=False,
                                  bold2t1w_dof=9,
                                  fmap_bspline=True,
                                  fmap_demean=True,
                                  use_syn=True,
                                  force_syn=True,
                                  low_mem=False,
                                  template_out_grid='native',
                                  medial_surface_nan=False,
                                  cifti_output=False,
                                  use_aroma=False,
                                  ignore_aroma_err=False,
                                  aroma_melodic_dim=None,
                                  num_bold=1)

    **Parameters**

        bold_file : str
            BOLD series NIfTI file
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister) and resampling
            BOLD series to FreeSurfer surface meshes.
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
        t2s_coreg : bool
            Use multiple BOLD echos to create T2*-map for T2*-driven coregistration
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        reportlets_dir : str
            Directory in which to save reportlets
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output spaces.

            Valid spaces:

                - T1w
                - template
                - fsnative
                - fsaverage (or other pre-existing FreeSurfer templates)
        template : str
            Name of template targeted by ``template`` output space
        output_dir : str
            Directory in which to save derivatives
        omp_nthreads : int
            Maximum number of threads an individual process may use
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        ignore_aroma_err : bool
            Do not fail on ICA-AROMA errors
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        debug : bool
            Enable debugging outputs
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        template_out_grid : str
            Keyword ('native', '1mm' or '2mm') or path of custom reference
            image for normalization
        layout : BIDSLayout
            BIDSLayout structure to enable metadata retrieval
        num_bold : int
            Total number of BOLD files that have been set for preprocessing
            (default is 1)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        t1_preproc
            Bias-corrected structural template image
        t1_brain
            Skull-stripped ``t1_preproc``
        t1_mask
            Mask of the skull-stripped template image
        t1_seg
            Segmentation of preprocessed structural image, including
            gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
        t1_tpms
            List of tissue probability maps in T1w space
        t1_2_mni_forward_transform
            ANTs-compatible affine-and-warp transform file
        t1_2_mni_reverse_transform
            ANTs-compatible affine-and-warp transform file (inverse)
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
        t1_2_fsnative_reverse_transform
            LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w


    **Outputs**

        bold_t1
            BOLD series, resampled to T1w space
        bold_mask_t1
            BOLD series mask in T1w space
        bold_mni
            BOLD series, resampled to template space
        bold_mask_mni
            BOLD series mask in template space
        confounds
            TSV of confounds
        surfaces
            BOLD series, resampled to FreeSurfer surfaces
        aroma_noise_ics
            Noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        bold_cifti
            BOLD CIFTI image
        cifti_variant
            combination of target spaces for `bold_cifti`


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.util.init_bold_reference_wf`
        * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
        * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
        * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_mni_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.pepolar.init_pepolar_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_fmap_estimator_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_sdc_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_nonlinear_sdc_wf`

    """
    from ..fieldmap.base import init_sdc_wf  # Avoid circular dependency (#1066)

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    LOGGER.log(
        25, ('Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
             'Memory resampled/largemem=%.2f/%.2f GB.'), ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # For doc building purposes
    if layout is None or bold_file == 'bold_preprocesing':
        LOGGER.log(25, 'No valid layout: building empty workflow.')
        metadata = {
            'RepetitionTime': 2.0,
            'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
            'PhaseEncodingDirection': 'j',
        }
        fmaps = [{
            'type':
            'phasediff',
            'phasediff':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
            'magnitude1':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
            'magnitude2':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
        }]
        run_stc = True
        multiecho = False
    else:
        # Find associated sbref, if possible
        entities = layout.parse_file_entities(ref_file)
        entities['type'] = 'sbref'
        files = layout.get(**entities, extensions=['nii', 'nii.gz'])
        refbase = os.path.basename(ref_file)
        if 'sbref' in ignore:
            LOGGER.info("Single-band reference files ignored.")
        elif files and multiecho:
            LOGGER.warning("Single-band reference found, but not supported in "
                           "multi-echo workflows at this time. Ignoring.")
        elif files:
            sbref_file = files[0].filename
            sbbase = os.path.basename(sbref_file)
            if len(files) > 1:
                LOGGER.warning(
                    "Multiple single-band reference files found for {}; using "
                    "{}".format(refbase, sbbase))
            else:
                LOGGER.log(
                    25, "Using single-band reference file {}".format(sbbase))
        else:
            LOGGER.log(25,
                       "No single-band-reference found for {}".format(refbase))

        metadata = layout.get_metadata(ref_file)

        # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
        fmaps = []
        if 'fieldmaps' not in ignore:
            fmaps = layout.get_fieldmap(ref_file, return_list=True)
            for fmap in fmaps:
                fmap['metadata'] = layout.get_metadata(fmap[fmap['type']])

        # Run SyN if forced or in the absence of fieldmap correction
        if force_syn or (use_syn and not fmaps):
            fmaps.append({'type': 'syn'})

        # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
        run_stc = ("SliceTiming" in metadata and 'slicetiming' not in ignore
                   and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Use T2* as target for ME-EPI in co-registration
    if t2s_coreg and not multiecho:
        LOGGER.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        t2s_coreg = False

    # Switch stc off
    if multiecho and run_stc is True:
        LOGGER.warning('Slice-timing correction is not available for '
                       'multiecho BOLD data (not implemented).')
        run_stc = False

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__desc__ = """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=num_bold)

    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and template spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'sbref_file', 'subjects_dir', 'subject_id', 't1_preproc',
        't1_brain', 't1_mask', 't1_seg', 't1_tpms', 't1_aseg', 't1_aparc',
        't1_2_mni_forward_transform', 't1_2_mni_reverse_transform',
        't1_2_fsnative_forward_transform', 't1_2_fsnative_reverse_transform'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        inputnode.inputs.sbref_file = sbref_file

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_mni', 'bold_mni_ref'
        'bold_mask_mni', 'bold_cifti', 'cifti_variant', 'cifti_variant_key',
        'confounds', 'surfaces', 't2s_map', 'aroma_noise_ics', 'melodic_mix',
        'nonaggr_denoised_file'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        output_spaces=output_spaces,
        slice_timing=run_stc,
        registration='FreeSurfer' if freesurfer else 'FSL',
        registration_dof=bold2t1w_dof,
        pe_direction=metadata.get("PhaseEncodingDirection")),
                      name='summary',
                      mem_gb=DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)

    func_derivatives_wf = init_func_derivatives_wf(output_dir=output_dir,
                                                   output_spaces=output_spaces,
                                                   template=template,
                                                   freesurfer=freesurfer,
                                                   use_aroma=use_aroma,
                                                   cifti_output=cifti_output)

    workflow.connect([
        (inputnode, func_derivatives_wf, [('bold_file',
                                           'inputnode.source_file')]),
        (outputnode, func_derivatives_wf,
         [('bold_t1', 'inputnode.bold_t1'),
          ('bold_t1_ref', 'inputnode.bold_t1_ref'),
          ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
          ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
          ('bold_mask_t1', 'inputnode.bold_mask_t1'),
          ('bold_mni', 'inputnode.bold_mni'),
          ('bold_mni_ref', 'inputnode.bold_mni_ref'),
          ('bold_mask_mni', 'inputnode.bold_mask_mni'),
          ('confounds', 'inputnode.confounds'),
          ('surfaces', 'inputnode.surfaces'),
          ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
          ('melodic_mix', 'inputnode.melodic_mix'),
          ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
          ('bold_cifti', 'inputnode.bold_cifti'),
          ('cifti_variant', 'inputnode.cifti_variant'),
          ('cifti_variant_key', 'inputnode.cifti_variant_key')]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=use_bbr,
                                   bold2t1w_dof=bold2t1w_dof,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=(fmaps is not None
                                                            or use_syn),
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(mem_gb=mem_gb['largemem'],
                                           metadata=metadata,
                                           name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not low_mem,
        use_fieldwarp=(fmaps is not None or use_syn),
        name='bold_bold_trans_wf')

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf,
             [('outputnode.bold_file', 'inputnode.bold_file'),
              ('outputnode.skip_vols', 'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
    else:  # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([
            (bold_reference_wf, boldbuffer, [('outputnode.bold_file',
                                              'bold_file')]),
        ])

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_wf(fmaps,
                              metadata,
                              omp_nthreads=omp_nthreads,
                              debug=debug,
                              fmap_demean=fmap_demean,
                              fmap_bspline=fmap_bspline)
    bold_sdc_wf.inputs.inputnode.template = template

    if not fmaps:
        LOGGER.warning('SDC: no fieldmaps found or they were ignored (%s).',
                       ref_file)
    elif fmaps[0]['type'] == 'syn':
        LOGGER.warning(
            'SDC: no fieldmaps found or they were ignored. '
            'Using EXPERIMENTAL "fieldmap-less SyN" correction '
            'for dataset %s.', ref_file)
    else:
        LOGGER.log(
            25, 'SDC: fieldmap estimation of type "%s" intended for %s found.',
            fmaps[0]['type'], ref_file)

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file'),
                                        ('sbref_file', 'inputnode.sbref_file')]
         ),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_seg', 'inputnode.t1_seg'),
                # Undefined if --no-freesurfer, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('t1_2_fsnative_reverse_transform',
                 'inputnode.t1_2_fsnative_reverse_transform')
            ]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1_brain', 'inputnode.t1_brain'),
                                       ('t1_mask', 'inputnode.t1_mask'),
                                       ('t1_aseg', 'inputnode.t1_aseg'),
                                       ('t1_aparc', 'inputnode.t1_aparc')]),
        (bold_split, bold_t1_trans_wf, [('out_files', 'inputnode.bold_split')
                                        ]),
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (inputnode, bold_sdc_wf, [('t1_brain', 'inputnode.t1_brain'),
                                  ('t1_2_mni_reverse_transform',
                                   'inputnode.t1_2_mni_reverse_transform')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.bold_ref'),
          ('outputnode.ref_image_brain', 'inputnode.bold_ref_brain'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, bold_reg_wf, [('outputnode.bold_ref_brain',
                                     'inputnode.ref_bold_brain')]),
        (bold_sdc_wf, bold_t1_trans_wf,
         [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
          ('outputnode.bold_mask', 'inputnode.ref_bold_mask'),
          ('outputnode.out_warp', 'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1_tpms', 'inputnode.t1_tpms'),
                                        ('t1_mask', 'inputnode.t1_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        # Connect bold_bold_trans_wf
        (inputnode, bold_bold_trans_wf, [('bold_file', 'inputnode.name_source')
                                         ]),
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')
                                          ]),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        (bold_bold_trans_wf, bold_confounds_wf,
         [('outputnode.bold', 'inputnode.bold'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    if fmaps:
        from ..fieldmap.unwarp import init_fmap_unwarp_report_wf
        sdc_type = fmaps[0]['type']

        # Report on BOLD correction
        fmap_unwarp_report_wf = init_fmap_unwarp_report_wf(suffix='sdc_%s' %
                                                           sdc_type)
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1_seg', 'inputnode.in_seg')
                                                ]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.bold_ref',
                                                   'inputnode.in_post')]),
        ])

        if force_syn and sdc_type != 'syn':
            syn_unwarp_report_wf = init_fmap_unwarp_report_wf(
                suffix='forcedsyn', name='syn_unwarp_report_wf')
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1_seg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf,
                 [('outputnode.syn_bold_ref', 'inputnode.in_post')]),
            ])

    # if multiecho data, select first echo for hmc correction
    if multiecho:
        inputnode.iterables = ('bold_file', bold_file)

        me_first_echo = pe.Node(FirstEcho(te_list=tes,
                                          in_files=bold_file,
                                          ref_imgs=bold_file),
                                name='me_first_echo')
        # Replace reference with the echo selected with FirstEcho
        workflow.disconnect([
            (inputnode, bold_reference_wf,
             [('bold_file', 'inputnode.bold_file'),
              ('sbref_file', 'inputnode.sbref_file')]),
            (bold_reference_wf, boldbuffer, [('outputnode.bold_file',
                                              'bold_file')]),
        ])
        workflow.connect([
            (me_first_echo, bold_reference_wf, [('first_image',
                                                 'inputnode.bold_file')]),
            (inputnode, boldbuffer, [('bold_file', 'bold_file')]),
        ])

        if t2s_coreg:
            # create a T2* map
            bold_t2s_wf = init_bold_t2s_wf(bold_echos=bold_file,
                                           echo_times=tes,
                                           mem_gb=mem_gb['resampled'],
                                           omp_nthreads=omp_nthreads,
                                           name='bold_t2s_wf')
            bold_t2s_wf.inputs.inputnode.name_source = ref_file

            # Replace EPI-to-T1w registration inputs
            workflow.disconnect([
                (bold_sdc_wf, bold_t1_trans_wf,
                 [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
                  ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
            ])
            workflow.connect([
                (bold_hmc_wf, bold_t2s_wf, [('outputnode.xforms',
                                             'inputnode.hmc_xforms')]),
                (bold_t2s_wf, bold_t1_trans_wf,
                 [('outputnode.t2s_map', 'inputnode.ref_bold_brain'),
                  ('outputnode.oc_mask', 'inputnode.ref_bold_mask')]),
            ])

    # Map final BOLD mask into T1w space (if required)
    if 'T1w' in output_spaces:
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_bold_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask',
                                                    'input_image')]),
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])

    if 'template' in output_spaces:
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_mni_trans_wf = init_bold_mni_trans_wf(
            template=template,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            template_out_grid=template_out_grid,
            use_compression=not low_mem,
            use_fieldwarp=fmaps is not None,
            name='bold_mni_trans_wf')
        carpetplot_wf = init_carpetplot_wf(mem_gb=mem_gb['resampled'],
                                           metadata=metadata,
                                           name='carpetplot_wf')

        workflow.connect([
            (inputnode, bold_mni_trans_wf,
             [('bold_file', 'inputnode.name_source'),
              ('t1_2_mni_forward_transform',
               'inputnode.t1_2_mni_forward_transform')]),
            (bold_split, bold_mni_trans_wf, [('out_files',
                                              'inputnode.bold_split')]),
            (bold_hmc_wf, bold_mni_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_mni_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf, bold_mni_trans_wf, [('outputnode.bold_mask',
                                                      'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_mni_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_mni_trans_wf, outputnode,
             [('outputnode.bold_mni', 'bold_mni'),
              ('outputnode.bold_mni_ref', 'bold_mni_ref'),
              ('outputnode.bold_mask_mni', 'bold_mask_mni')]),
            (bold_bold_trans_wf, carpetplot_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (inputnode, carpetplot_wf,
             [('t1_2_mni_reverse_transform',
               'inputnode.t1_2_mni_reverse_transform')]),
            (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
            (bold_confounds_wf, carpetplot_wf, [('outputnode.confounds_file',
                                                 'inputnode.confounds_file')]),
        ])

        if use_aroma:
            # ICA-AROMA workflow
            # Internally resamples to MNI152 Linear (2006)
            from .confounds import init_ica_aroma_wf
            from ...interfaces import JoinTSVColumns

            ica_aroma_wf = init_ica_aroma_wf(
                template=template,
                metadata=metadata,
                mem_gb=mem_gb['resampled'],
                omp_nthreads=omp_nthreads,
                use_fieldwarp=fmaps is not None,
                ignore_aroma_err=ignore_aroma_err,
                aroma_melodic_dim=aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(JoinTSVColumns(), name='aroma_confounds')

            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf,
                 [('bold_file', 'inputnode.name_source'),
                  ('t1_2_mni_forward_transform',
                   'inputnode.t1_2_mni_forward_transform')]),
                (bold_split, ica_aroma_wf, [('out_files',
                                             'inputnode.bold_split')]),
                (bold_hmc_wf, ica_aroma_wf,
                 [('outputnode.movpar_file', 'inputnode.movpar_file'),
                  ('outputnode.xforms', 'inputnode.hmc_xforms')]),
                (bold_reg_wf, ica_aroma_wf, [('outputnode.itk_bold_to_t1',
                                              'inputnode.itk_bold_to_t1')]),
                (bold_bold_trans_wf, ica_aroma_wf, [('outputnode.bold_mask',
                                                     'inputnode.bold_mask')]),
                (bold_sdc_wf, ica_aroma_wf, [('outputnode.out_warp',
                                              'inputnode.fieldwarp')]),
                (bold_reference_wf, ica_aroma_wf, [('outputnode.skip_vols',
                                                    'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
            ])

    # SURFACES ##################################################################################
    if freesurfer and any(space.startswith('fs') for space in output_spaces):
        LOGGER.log(25, 'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(mem_gb=mem_gb['resampled'],
                                         output_spaces=output_spaces,
                                         medial_surface_nan=medial_surface_nan,
                                         name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1_preproc', 'inputnode.t1_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1_2_fsnative_forward_transform',
               'inputnode.t1_2_fsnative_forward_transform')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
        ])

        # CIFTI output
        if cifti_output and 'template' in output_spaces:
            bold_surf_wf.__desc__ += """\
*Grayordinates* files [@hcppipelines], which combine surface-sampled
data and volume-sampled data, were also generated.
"""
            gen_cifti = pe.MapNode(GenerateCifti(),
                                   iterfield=["surface_target", "gifti_files"],
                                   name="gen_cifti")
            gen_cifti.inputs.TR = metadata.get("RepetitionTime")

            workflow.connect([
                (bold_surf_wf, gen_cifti, [('targets.out', 'surface_target'),
                                           ('outputnode.surfaces',
                                            'gifti_files')]),
                (inputnode, gen_cifti, [('subjects_dir', 'subjects_dir')]),
                (bold_mni_trans_wf, gen_cifti, [('outputnode.bold_mni',
                                                 'bold_file')]),
                (gen_cifti, outputnode, [('out_file', 'bold_cifti'),
                                         ('variant', 'cifti_variant'),
                                         ('variant_key', 'cifti_variant_key')
                                         ]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(suffix='summary'),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, suffix='validation'),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 2
0
def init_bold_preproc_trans_wf(mem_gb,
                               omp_nthreads,
                               name='bold_preproc_trans_wf',
                               use_compression=True,
                               use_fieldwarp=False,
                               split_file=False,
                               interpolation='LanczosWindowedSinc'):
    """
    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original BOLD series.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_preproc_trans_wf
        wf = init_bold_preproc_trans_wf(mem_gb=3, omp_nthreads=1)

    **Parameters**

        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_std_trans_wf``)
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI
        split_file : bool
            Whether the input file should be splitted (it is a 4D file)
            or it is a list of 3D files (default ``False``, do not split)
        interpolation : str
            Interpolation type to be used by ANTs' ``applyTransforms``
            (default ``'LanczosWindowedSinc'``)

    **Inputs**

        bold_file
            Individual 3D volumes, not motion corrected
        bold_mask
            Skull-stripping mask of reference image
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format

    **Outputs**

        bold
            BOLD series, resampled in native space, including all preprocessing
        bold_mask
            BOLD series mask calculated with the new time-series
        bold_ref
            BOLD reference image: an average-like 3D image of the time-series
        bold_ref_brain
            Same as ``bold_ref``, but once the brain mask has been applied

    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions""" if use_fieldwarp else """\
the transforms to correct for head-motion""")

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'name_source', 'bold_file', 'bold_mask', 'hmc_xforms', 'fieldwarp'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold', 'bold_mask', 'bold_ref', 'bold_ref_brain']),
                         name='outputnode')

    bold_transform = pe.Node(MultiApplyTransforms(interpolation=interpolation,
                                                  float=True,
                                                  copy_dtype=True),
                             name='bold_transform',
                             mem_gb=mem_gb * 3 * omp_nthreads,
                             n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    # Generate a new BOLD reference
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.__desc__ = None  # Unset description to avoid second appearance

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (bold_transform, merge, [('out_files', 'in_files')]),
        (merge, bold_reference_wf, [('out_file', 'inputnode.bold_file')]),
        (merge, outputnode, [('out_file', 'bold')]),
        (bold_reference_wf, outputnode,
         [('outputnode.ref_image', 'bold_ref'),
          ('outputnode.ref_image_brain', 'bold_ref_brain'),
          ('outputnode.bold_mask', 'bold_mask')]),
    ])

    # Input file is not splitted
    if split_file:
        bold_split = pe.Node(FSLSplit(dimension='t'),
                             name='bold_split',
                             mem_gb=mem_gb * 3)
        workflow.connect([(inputnode, bold_split, [('bold_file', 'in_file')]),
                          (bold_split, bold_transform, [
                              ('out_files', 'input_image'),
                              (('out_files', _first), 'reference_image'),
                          ])])
    else:
        workflow.connect([
            (inputnode, bold_transform, [('bold_file', 'input_image'),
                                         (('bold_file', _first),
                                          'reference_image')]),
        ])

    if use_fieldwarp:
        merge_xforms = pe.Node(niu.Merge(2),
                               name='merge_xforms',
                               run_without_submitting=True,
                               mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, merge_xforms, [('fieldwarp', 'in1'),
                                       ('hmc_xforms', 'in2')]),
            (merge_xforms, bold_transform, [('out', 'transforms')]),
        ])
    else:

        def _aslist(val):
            return [val]

        workflow.connect([
            (inputnode, bold_transform, [(('hmc_xforms', _aslist),
                                          'transforms')]),
        ])

    # Code ready to generate a pre/post processing report
    # bold_bold_report_wf = init_bold_preproc_report_wf(
    #     mem_gb=mem_gb['resampled'],
    #     reportlets_dir=reportlets_dir
    # )
    # workflow.connect([
    #     (inputnode, bold_bold_report_wf, [
    #         ('bold_file', 'inputnode.name_source'),
    #         ('bold_file', 'inputnode.in_pre')]),  # This should be after STC
    #     (bold_bold_trans_wf, bold_bold_report_wf, [
    #         ('outputnode.bold', 'inputnode.in_post')]),
    # ])

    return workflow
Ejemplo n.º 3
0
def init_asl_preproc_trans_wf(mem_gb,
                              omp_nthreads,
                              name='asl_preproc_trans_wf',
                              use_compression=True,
                              use_fieldwarp=False,
                              split_file=False,
                              interpolation='LanczosWindowedSinc'):
    """
    Resample in native (original) space.

    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original asl series.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from aslprep.workflows.asl import init_asl_preproc_trans_wf
            wf = init_asl_preproc_trans_wf(mem_gb=3, omp_nthreads=1)

    Parameters
    ----------
    mem_gb : :obj:`float`
        Size of asl file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``asl_std_trans_wf``)
    use_compression : :obj:`bool`
        Save registered asl series as ``.nii.gz``
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from asl to MNI
    split_file : :obj:`bool`
        Whether the input file should be splitted (it is a 4D file)
        or it is a list of 3D files (default ``False``, do not split)
    interpolation : :obj:`str`
        Interpolation type to be used by ANTs' ``applyTransforms``
        (default ``'LanczosWindowedSinc'``)

    Inputs
    ------
    asl_file
        Individual 3D volumes, not motion corrected
    asl_mask
        Skull-stripping mask of reference image
    name_source
        asl series NIfTI file
        Used to recover original information lost during processing
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    asl
        asl series, resampled in native space, including all preprocessing
    asl_mask
        asl series mask calculated with the new time-series
    asl_ref
        asl reference image: an average-like 3D image of the time-series
    asl_ref_brain
        Same as ``asl_ref``, but once the brain mask has been applied

    """
    from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from ...niworkflows.func.util import init_asl_reference_wf
    from ...niworkflows.interfaces.itk import MultiApplyTransforms
    from ...niworkflows.interfaces.nilearn import Merge

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The asl time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled asl time-series will be referred to as *preprocessed
asl in original space*, or just *preprocessed asl*.
""".format(transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions""" if use_fieldwarp else """\
the transforms to correct for head-motion""")

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'name_source', 'asl_file', 'asl_mask', 'hmc_xforms', 'fieldwarp'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['asl', 'asl_mask', 'asl_ref', 'asl_ref_brain']),
                         name='outputnode')

    asl_transform = pe.Node(MultiApplyTransforms(interpolation=interpolation,
                                                 float=True,
                                                 copy_dtype=True),
                            name='asl_transform',
                            mem_gb=mem_gb * 3 * omp_nthreads,
                            n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    # Generate a new asl reference
    asl_reference_wf = init_asl_reference_wf(omp_nthreads=omp_nthreads)
    asl_reference_wf.__desc__ = None  # Unset description to avoid second appearance

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (asl_transform, merge, [('out_files', 'in_files')]),
        (merge, asl_reference_wf, [('out_file', 'inputnode.asl_file')]),
        (merge, outputnode, [('out_file', 'asl')]),
        (asl_reference_wf, outputnode, [('outputnode.ref_image', 'asl_ref'),
                                        ('outputnode.ref_image_brain',
                                         'asl_ref_brain'),
                                        ('outputnode.asl_mask', 'asl_mask')]),
    ])

    # Input file is not splitted
    if split_file:
        asl_split = pe.Node(FSLSplit(dimension='t'),
                            name='asl_split',
                            mem_gb=mem_gb * 3)
        workflow.connect([(inputnode, asl_split, [('asl_file', 'in_file')]),
                          (asl_split, asl_transform, [
                              ('out_files', 'input_image'),
                              (('out_files', _first), 'reference_image'),
                          ])])
    else:
        workflow.connect([
            (inputnode, asl_transform, [('asl_file', 'input_image'),
                                        (('asl_file', _first),
                                         'reference_image')]),
        ])

    if use_fieldwarp:
        merge_xforms = pe.Node(niu.Merge(2),
                               name='merge_xforms',
                               run_without_submitting=True,
                               mem_gb=DEFAULT_MEMORY_MIN_GB)
        workflow.connect([
            (inputnode, merge_xforms, [('fieldwarp', 'in1'),
                                       ('hmc_xforms', 'in2')]),
            (merge_xforms, asl_transform, [('out', 'transforms')]),
        ])
    else:

        def _aslist(val):
            return [val]

        workflow.connect([
            (inputnode, asl_transform, [(('hmc_xforms', _aslist), 'transforms')
                                        ]),
        ])
    return workflow
Ejemplo n.º 4
0
def init_func_preproc_wf(bold_file):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep import config
            from fmriprep.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.nibabel import ApplyMask
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.utils import DictMerge
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    # Have some options handy
    layout = config.execution.layout
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        'Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
        'Memory resampled/largemem=%.2f/%.2f GB.', ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # Find associated sbref, if possible
    entities = layout.parse_file_entities(ref_file)
    entities['suffix'] = 'sbref'
    entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
    files = layout.get(return_type='file', **entities)
    refbase = os.path.basename(ref_file)
    if 'sbref' in config.workflow.ignore:
        config.loggers.workflow.info("Single-band reference files ignored.")
    elif files and multiecho:
        config.loggers.workflow.warning(
            "Single-band reference found, but not supported in "
            "multi-echo workflows at this time. Ignoring.")
    elif files:
        sbref_file = files[0]
        sbbase = os.path.basename(sbref_file)
        if len(files) > 1:
            config.loggers.workflow.warning(
                "Multiple single-band reference files found for {}; using "
                "{}".format(refbase, sbbase))
        else:
            config.loggers.workflow.info(
                "Using single-band reference file %s.", sbbase)
    else:
        config.loggers.workflow.info("No single-band-reference found for %s.",
                                     refbase)

    metadata = layout.get_metadata(ref_file)

    # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
    fmaps = None
    if 'fieldmaps' not in config.workflow.ignore:
        fmaps = fieldmap_wrangler(layout,
                                  ref_file,
                                  use_syn=config.workflow.use_syn,
                                  force_syn=config.workflow.force_syn)
    elif config.workflow.use_syn or config.workflow.force_syn:
        # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
        fmaps = {'syn': False}

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore
               and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if config.workflow.t2s_coreg and not multiecho:
        config.loggers.workflow.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        config.workflow.t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if config.workflow.t2s_coreg and config.workflow.use_bbr is None:
        config.workflow.use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'subjects_dir', 'subject_id', 't1w_preproc', 't1w_mask',
        't1w_dseg', 't1w_tpms', 't1w_aseg', 't1w_aparc', 'anat2std_xfm',
        'std2anat_xfm', 'template', 't1w2fsnative_xfm', 'fsnative2t1w_xfm'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_std', 'bold_std_ref', 'bold_mask_std',
        'bold_aseg_std', 'bold_aparc_std', 'bold_native', 'bold_cifti',
        'cifti_variant', 'cifti_metadata', 'cifti_density', 'surfaces',
        'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
        'confounds_metadata'
    ]),
                         name='outputnode')

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=config.workflow.bold2t1w_dof,
        registration_init=config.workflow.bold2t1w_init,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=config.DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=str(config.execution.output_dir),
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file',
                                             'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=config.workflow.use_bbr,
                                   bold2t1w_dof=config.workflow.bold2t1w_dof,
                                   bold2t1w_init=config.workflow.bold2t1w_init,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=bool(fmaps),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=bool(fmaps),
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(bold_reference_wf, bold_stc_wf, [
                ('outputnode.bold_file', 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(bold_reference_wf, boldbuffer,
                           [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmaps,
                                       metadata,
                                       omp_nthreads=omp_nthreads,
                                       debug=config.execution.debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=config.workflow.t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        (inputnode, t1w_brain, [('t1w_preproc', 'in_file'),
                                ('t1w_mask', 'in_mask')]),
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [('outputnode.algo_dummy_scans',
                                       'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1w_dseg', 'inputnode.t1w_dseg'),
                # Undefined if --fs-no-reconall, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')
            ]),
        (t1w_brain, bold_reg_wf, [('out_file', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1w_mask', 'inputnode.t1w_mask'),
                                       ('t1w_aseg', 'inputnode.t1w_aseg'),
                                       ('t1w_aparc', 'inputnode.t1w_aparc')]),
        (t1w_brain, bold_t1_trans_wf, [('out_file', 'inputnode.t1w_brain')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (t1w_brain, bold_sdc_wf, [('out_file', 'inputnode.t1w_brain')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.epi_file'),
          ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
          ('outputnode.bold_mask', 'inputnode.epi_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [('outputnode.out_warp',
                                          'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.epi_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                        ('t1w_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    if not config.workflow.t2s_coreg:
        workflow.connect([
            (bold_sdc_wf, bold_reg_wf, [('outputnode.epi_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_sdc_wf, bold_t1_trans_wf,
             [('outputnode.epi_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.epi_mask', 'inputnode.ref_bold_mask')]),
        ])
    else:
        workflow.connect([
            # For t2s_coreg, replace EPI-to-T1w registration inputs
            (bold_t2s_wf, bold_reg_wf, [('outputnode.bold_ref_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_t2s_wf, bold_t1_trans_wf,
             [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1w_dseg',
                                                 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.epi_corrected',
                                                   'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'fmriprep'

        for node in bold_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(KeySelect(fields=['std2anat_xfm']),
                                     name='sdc_select_std',
                                     run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
                                             ('template', 'keys')]),
                (sdc_select_std, bold_sdc_wf, [('std2anat_xfm',
                                                'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1w_dseg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [('outputnode.syn_ref',
                                                      'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'fmriprep'

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('template', 'inputnode.templates'),
              ('anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('t1w_aseg', 'inputnode.bold_aseg'),
              ('t1w_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             bold_std_trans_wf, [('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            workflow.connect([(bold_split, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                use_fieldwarp=bool(fmaps),
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [('outputnode.skip_vols',
                                                    'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug(
            'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1w_preproc', 'inputnode.t1w_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [('outputnode.target',
                                                  'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [('subjects_dir',
                                                'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode,
                 [('outputnode.cifti_bold', 'bold_cifti'),
                  ('outputnode.cifti_variant', 'cifti_variant'),
                  ('outputnode.cifti_metadata', 'cifti_metadata'),
                  ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            cifti_output=config.workflow.cifti_output,
            name='carpetplot_wf')

        if config.workflow.cifti_output:
            workflow.connect(bold_grayords_wf, 'outputnode.cifti_bold',
                             carpetplot_wf, 'inputnode.cifti_bold')
        else:
            # Xform to 'MNI152NLin2009cAsym' is always computed.
            carpetplot_select_std = pe.Node(KeySelect(
                fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
                                            name='carpetplot_select_std',
                                            run_without_submitting=True)

            workflow.connect([
                (inputnode, carpetplot_select_std, [('std2anat_xfm',
                                                     'std2anat_xfm'),
                                                    ('template', 'keys')]),
                (carpetplot_select_std, carpetplot_wf,
                 [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
                 carpetplot_wf, [('outputnode.bold', 'inputnode.bold'),
                                 ('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                               'inputnode.t1_bold_xform')]),
            ])

        workflow.connect([(bold_confounds_wf, carpetplot_wf, [
            ('outputnode.confounds_file', 'inputnode.confounds_file')
        ])])

    # REPORTING ############################################################
    reportlets_dir = str(config.execution.work_dir / 'reportlets')
    ds_report_summary = pe.Node(DerivativesDataSink(desc='summary',
                                                    keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='validation', keep_dtype=True),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 5
0
def init_func_preproc_wf(
    aroma_melodic_dim,
    bold2t1w_dof,
    bold_file,
    cifti_output,
    debug,
    dummy_scans,
    err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    ignore,
    low_mem,
    medial_surface_nan,
    omp_nthreads,
    output_dir,
    output_spaces,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    reportlets_dir,
    t2s_coreg,
    use_aroma,
    use_bbr,
    use_syn,
    layout=None,
    num_bold=1,
):
    """
    This workflow controls the functional preprocessing stages of FMRIPREP.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_func_preproc_wf
        from collections import namedtuple, OrderedDict
        BIDSLayout = namedtuple('BIDSLayout', ['root'])
        wf = init_func_preproc_wf(
            aroma_melodic_dim=-200,
            bold2t1w_dof=9,
            bold_file='/completely/made/up/path/sub-01_task-nback_bold.nii.gz',
            cifti_output=False,
            debug=False,
            dummy_scans=None,
            err_on_aroma_warn=False,
            fmap_bspline=True,
            fmap_demean=True,
            force_syn=True,
            freesurfer=True,
            ignore=[],
            low_mem=False,
            medial_surface_nan=False,
            omp_nthreads=1,
            output_dir='.',
            output_spaces=OrderedDict([
                ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
                ('T1w', {}), ('fsnative', {})]),
            regressors_all_comps=False,
            regressors_dvars_th=1.5,
            regressors_fd_th=0.5,
            reportlets_dir='.',
            t2s_coreg=False,
            use_aroma=False,
            use_bbr=True,
            use_syn=True,
            layout=BIDSLayout('.'),
            num_bold=1,
        )

    **Parameters**

        aroma_melodic_dim : int
            Maximum number of components identified by MELODIC within ICA-AROMA
            (default is -200, ie. no limitation).
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        bold_file : str
            BOLD series NIfTI file
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        debug : bool
            Enable debugging outputs
        dummy_scans : int or None
            Number of volumes to consider as non steady state
        err_on_aroma_warn : bool
            Do not crash on ICA-AROMA errors
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister) and resampling
            BOLD series to FreeSurfer surface meshes.
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        omp_nthreads : int
            Maximum number of threads an individual process may use
        output_dir : str
            Directory in which to save derivatives
        output_spaces : OrderedDict
            Ordered dictionary where keys are TemplateFlow ID strings (e.g. ``MNI152Lin``,
            ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
            nonstandard references (e.g. ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
            or paths pointing to custom templates organized in a TemplateFlow-like structure.
            Values of the dictionary aggregate modifiers (e.g. the value for the key ``MNI152Lin``
            could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
            resolution version of the selected template).
        regressors_all_comps
            Return all CompCor component time series instead of the top fraction
        regressors_dvars_th
            Criterion for flagging DVARS outliers
        regressors_fd_th
            Criterion for flagging framewise displacement outliers
        reportlets_dir : str
            Absolute path of a directory in which reportlets will be temporarily stored
        t2s_coreg : bool
            For multiecho EPI, use the calculated T2*-map for T2*-driven coregistration
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
            When using ``t2s_coreg``, BBR will be enabled by default unless
            explicitly specified otherwise.
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        layout : BIDSLayout
            BIDSLayout structure to enable metadata retrieval
        num_bold : int
            Total number of BOLD files that have been set for preprocessing
            (default is 1)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        t1_preproc
            Bias-corrected structural template image
        t1_brain
            Skull-stripped ``t1_preproc``
        t1_mask
            Mask of the skull-stripped template image
        t1_seg
            Segmentation of preprocessed structural image, including
            gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
        t1_tpms
            List of tissue probability maps in T1w space
        anat2std_xfm
            ANTs-compatible affine-and-warp transform file
        std2anat_xfm
            ANTs-compatible affine-and-warp transform file (inverse)
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
        t1_2_fsnative_reverse_transform
            LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w


    **Outputs**

        bold_t1
            BOLD series, resampled to T1w space
        bold_mask_t1
            BOLD series mask in T1w space
        bold_std
            BOLD series, resampled to template space
        bold_mask_std
            BOLD series mask in template space
        confounds
            TSV of confounds
        surfaces
            BOLD series, resampled to FreeSurfer surfaces
        aroma_noise_ics
            Noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        bold_cifti
            BOLD CIFTI image
        cifti_variant
            combination of target spaces for `bold_cifti`


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.util.init_bold_reference_wf`
        * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
        * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
        * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.pepolar.init_pepolar_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_fmap_estimator_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_sdc_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_nonlinear_sdc_wf`

    """
    from .resampling import NONSTANDARD_REFERENCES
    from ..fieldmap.base import init_sdc_wf  # Avoid circular dependency (#1066)

    # Filter out standard spaces to a separate dict
    std_spaces = OrderedDict([(key, modifiers)
                              for key, modifiers in output_spaces.items()
                              if key not in NONSTANDARD_REFERENCES])
    volume_std_spaces = OrderedDict([(key, modifiers)
                                     for key, modifiers in std_spaces.items()
                                     if not key.startswith('fs')])

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    LOGGER.log(
        25, ('Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
             'Memory resampled/largemem=%.2f/%.2f GB.'), ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # For doc building purposes
    if not hasattr(layout, 'parse_file_entities'):
        LOGGER.log(25, 'No valid layout: building empty workflow.')
        metadata = {
            'RepetitionTime': 2.0,
            'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
            'PhaseEncodingDirection': 'j',
        }
        fmaps = [{
            'suffix':
            'phasediff',
            'phasediff':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
            'magnitude1':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
            'magnitude2':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
        }]
        run_stc = True
        multiecho = False
    else:
        # Find associated sbref, if possible
        entities = layout.parse_file_entities(ref_file)
        entities['suffix'] = 'sbref'
        entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
        files = layout.get(return_type='file', **entities)
        refbase = os.path.basename(ref_file)
        if 'sbref' in ignore:
            LOGGER.info("Single-band reference files ignored.")
        elif files and multiecho:
            LOGGER.warning("Single-band reference found, but not supported in "
                           "multi-echo workflows at this time. Ignoring.")
        elif files:
            sbref_file = files[0]
            sbbase = os.path.basename(sbref_file)
            if len(files) > 1:
                LOGGER.warning(
                    "Multiple single-band reference files found for {}; using "
                    "{}".format(refbase, sbbase))
            else:
                LOGGER.log(
                    25, "Using single-band reference file {}".format(sbbase))
        else:
            LOGGER.log(25,
                       "No single-band-reference found for {}".format(refbase))

        metadata = layout.get_metadata(ref_file)

        # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
        fmaps = []
        if 'fieldmaps' not in ignore:
            for fmap in layout.get_fieldmap(ref_file, return_list=True):
                if fmap['suffix'] == 'phase':
                    LOGGER.warning("""\
Found phase1/2 type of fieldmaps, which are not currently supported. \
fMRIPrep will discard them for susceptibility distortion correction. \
Please, follow up on this issue at \
https://github.com/poldracklab/fmriprep/issues/1655.""")
                else:
                    fmap['metadata'] = layout.get_metadata(
                        fmap[fmap['suffix']])
                    fmaps.append(fmap)

        # Run SyN if forced or in the absence of fieldmap correction
        if force_syn or (use_syn and not fmaps):
            fmaps.append({'suffix': 'syn'})

        # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
        run_stc = ("SliceTiming" in metadata and 'slicetiming' not in ignore
                   and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if t2s_coreg and not multiecho:
        LOGGER.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if t2s_coreg and use_bbr is None:
        use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__desc__ = """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=num_bold)

    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'subjects_dir', 'subject_id', 't1_preproc', 't1_brain',
        't1_mask', 't1_seg', 't1_tpms', 't1_aseg', 't1_aparc', 'anat2std_xfm',
        'std2anat_xfm', 'template', 'joint_anat2std_xfm', 'joint_std2anat_xfm',
        'joint_template', 't1_2_fsnative_forward_transform',
        't1_2_fsnative_reverse_transform'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_std', 'bold_std_ref'
        'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std', 'bold_native',
        'bold_cifti', 'cifti_variant', 'cifti_variant_key', 'surfaces',
        'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
        'confounds_metadata'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=bold2t1w_dof,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = dummy_scans

    # CIfTI output: currently, we only support fsaverage{5,6}
    cifti_spaces = set(s for s in output_spaces.keys()
                       if s in ('fsaverage5', 'fsaverage6'))
    fsaverage_den = output_spaces.get('fsaverage', {}).get('den')
    if fsaverage_den:
        cifti_spaces.add(FSAVERAGE_DENSITY[fsaverage_den])
    cifti_output = cifti_output and cifti_spaces
    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=output_dir,
        output_spaces=output_spaces,
        standard_spaces=list(std_spaces.keys()),
        use_aroma=use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surfaces'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_variant_key', 'inputnode.cifti_variant_key'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file',
                                             'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=use_bbr,
                                   bold2t1w_dof=bold2t1w_dof,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=(fmaps is not None
                                                            or use_syn),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=regressors_all_comps,
        regressors_fd_th=regressors_fd_th,
        regressors_dvars_th=regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not low_mem,
        use_fieldwarp=(fmaps is not None or use_syn),
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(bold_reference_wf, bold_stc_wf, [
                ('outputnode.bold_file', 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(bold_reference_wf, boldbuffer,
                           [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_wf(fmaps,
                              metadata,
                              omp_nthreads=omp_nthreads,
                              debug=debug,
                              fmap_demean=fmap_demean,
                              fmap_bspline=fmap_bspline)
    # If no standard space is given, use the default for SyN-SDC
    if not volume_std_spaces or 'MNI152NLin2009cAsym' in volume_std_spaces:
        bold_sdc_wf.inputs.inputnode.template = 'MNI152NLin2009cAsym'
    else:
        bold_sdc_wf.inputs.inputnode.template = next(iter(volume_std_spaces))

    if not fmaps:
        LOGGER.warning('SDC: no fieldmaps found or they were ignored (%s).',
                       ref_file)
    elif fmaps[0]['suffix'] == 'syn':
        LOGGER.warning(
            'SDC: no fieldmaps found or they were ignored. '
            'Using EXPERIMENTAL "fieldmap-less SyN" correction '
            'for dataset %s.', ref_file)
    else:
        LOGGER.log(
            25, 'SDC: fieldmap estimation of type "%s" intended for %s found.',
            fmaps[0]['suffix'], ref_file)

    # Overwrite ``out_path_base`` of sdcflows' DataSinks
    for node in bold_sdc_wf.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from .util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [('outputnode.algo_dummy_scans',
                                       'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_seg', 'inputnode.t1_seg'),
                # Undefined if --no-freesurfer, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('t1_2_fsnative_reverse_transform',
                 'inputnode.t1_2_fsnative_reverse_transform')
            ]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1_brain', 'inputnode.t1_brain'),
                                       ('t1_mask', 'inputnode.t1_mask'),
                                       ('t1_aseg', 'inputnode.t1_aseg'),
                                       ('t1_aparc', 'inputnode.t1_aparc')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (inputnode, bold_sdc_wf, [('joint_template', 'inputnode.templates'),
                                  ('joint_std2anat_xfm',
                                   'inputnode.std2anat_xfm')]),
        (inputnode, bold_sdc_wf, [('t1_brain', 'inputnode.t1_brain')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.bold_ref'),
          ('outputnode.ref_image_brain', 'inputnode.bold_ref_brain'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        # For t2s_coreg, replace EPI-to-T1w registration inputs
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_reg_wf,
         [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_t1_trans_wf,
         [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
          ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [('outputnode.out_warp',
                                          'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1_tpms', 'inputnode.t1_tpms'),
                                        ('t1_mask', 'inputnode.t1_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])

    if fmaps:
        from ..fieldmap.unwarp import init_fmap_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_fmap_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1_seg', 'inputnode.in_seg')
                                                ]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.bold_ref',
                                                   'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'fmriprep'

        if force_syn and fmaps[0]['suffix'] != 'syn':
            syn_unwarp_report_wf = init_fmap_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1_seg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf,
                 [('outputnode.syn_bold_ref', 'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'fmriprep'

    # Map final BOLD mask into T1w space (if required)
    if 'T1w' in output_spaces or 'anat' in output_spaces:
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])

    if set(['func', 'run', 'bold', 'boldref',
            'sbref']).intersection(output_spaces):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if volume_std_spaces:
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            standard_spaces=volume_std_spaces,
            name='bold_std_trans_wf',
            use_compression=not low_mem,
            use_fieldwarp=fmaps is not None,
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('joint_template', 'inputnode.templates'),
              ('joint_anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('t1_aseg', 'inputnode.bold_aseg'),
              ('t1_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             bold_std_trans_wf, [('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('poutputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('poutputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if 'MNI152NLin2009cAsym' in std_spaces:
            carpetplot_wf = init_carpetplot_wf(standard_spaces=std_spaces,
                                               mem_gb=mem_gb['resampled'],
                                               metadata=metadata,
                                               name='carpetplot_wf')
            workflow.connect([
                (inputnode, carpetplot_wf, [('joint_std2anat_xfm',
                                             'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
                 carpetplot_wf, [('outputnode.bold', 'inputnode.bold'),
                                 ('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                               'inputnode.t1_bold_xform')]),
                (bold_confounds_wf, carpetplot_wf,
                 [('outputnode.confounds_file', 'inputnode.confounds_file')]),
            ])

        if not multiecho:
            workflow.connect([(bold_split, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

        # Artifacts resampled in MNI space can only be sinked if they
        # were actually generated. See #1348.
        # Uses the parameterized outputnode to generate all outputs
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('poutputnode.templates', 'inputnode.template'),
                ('poutputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('poutputnode.bold_std', 'inputnode.bold_std'),
                ('poutputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if use_aroma and 'MNI152NLin6Asym' in std_spaces:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf

            ica_aroma_wf = init_ica_aroma_wf(
                metadata=metadata,
                mem_gb=mem_gb['resampled'],
                omp_nthreads=omp_nthreads,
                use_fieldwarp=fmaps is not None,
                err_on_aroma_warn=err_on_aroma_warn,
                aroma_melodic_dim=aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.templates', 'inputnode.templates')]),
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [('outputnode.skip_vols',
                                                    'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
            ])

    # SURFACES ##################################################################################
    surface_spaces = [
        space for space in output_spaces.keys() if space.startswith('fs')
    ]
    if freesurfer and surface_spaces:
        LOGGER.log(25, 'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(mem_gb=mem_gb['resampled'],
                                         output_spaces=surface_spaces,
                                         medial_surface_nan=medial_surface_nan,
                                         name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1_preproc', 'inputnode.t1_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1_2_fsnative_forward_transform',
               'inputnode.t1_2_fsnative_forward_transform')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
        ])

        if cifti_output:
            from niworkflows.interfaces.utility import KeySelect
            bold_surf_wf.__desc__ += """\
*Grayordinates* files [@hcppipelines], which combine surface-sampled
data and volume-sampled data, were also generated.
"""
            select_std = pe.Node(KeySelect(fields=['bold_std']),
                                 name='select_std',
                                 run_without_submitting=True)
            select_std.inputs.key = 'MNI152NLin2009cAsym'

            gen_cifti = pe.MapNode(GenerateCifti(),
                                   iterfield=["surface_target", "gifti_files"],
                                   name="gen_cifti")
            gen_cifti.inputs.TR = metadata.get("RepetitionTime")
            gen_cifti.inputs.surface_target = list(cifti_spaces)

            workflow.connect([
                (bold_std_trans_wf, select_std,
                 [('outputnode.templates', 'keys'),
                  ('outputnode.bold_std', 'bold_std')]),
                (bold_surf_wf, gen_cifti, [('outputnode.surfaces',
                                            'gifti_files')]),
                (inputnode, gen_cifti, [('subjects_dir', 'subjects_dir')]),
                (select_std, gen_cifti, [('bold_std', 'bold_file')]),
                (gen_cifti, outputnode, [('out_file', 'bold_cifti'),
                                         ('variant', 'cifti_variant'),
                                         ('variant_key', 'cifti_variant_key')
                                         ]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(desc='summary',
                                                    keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='validation', keep_dtype=True),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 6
0
def init_bold_t1_trans_wf(freesurfer, mem_gb, omp_nthreads, multiecho=False, use_fieldwarp=False,
                          use_compression=True, name='bold_t1_trans_wf'):
    """
    Co-register the reference BOLD image to T1w-space.

    The workflow uses :abbr:`BBR (boundary-based registration)`.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.bold.registration import init_bold_t1_trans_wf
            wf = init_bold_t1_trans_wf(freesurfer=True,
                                       mem_gb=3,
                                       omp_nthreads=1)

    Parameters
    ----------
    freesurfer : bool
        Enable FreeSurfer functional registration (bbregister)
    use_fieldwarp : bool
        Include SDC warp in single-shot transform from BOLD to T1
    multiecho : bool
        If multiecho data was supplied, HMC already performed
    mem_gb : float
        Size of BOLD file in GB
    omp_nthreads : int
        Maximum number of threads an individual process may use
    use_compression : bool
        Save registered BOLD series as ``.nii.gz``
    name : str
        Name of workflow (default: ``bold_reg_wf``)

    Inputs
    ------
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    ref_bold_brain
        Reference image to which BOLD series is aligned
        If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
    ref_bold_mask
        Skull-stripping mask of reference image
    t1w_brain
        Skull-stripped bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_aseg
        FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
        (only if ``recon-all`` was run).
    t1w_aparc
        FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
        (only if ``recon-all`` was run).
    bold_split
        Individual 3D BOLD volumes, not motion corrected
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    itk_bold_to_t1
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    bold_t1
        Motion-corrected BOLD series in T1 space
    bold_t1_ref
        Reference, contrast-enhanced summary of the motion-corrected BOLD series in T1w space
    bold_mask_t1
        BOLD mask in T1 space
    bold_aseg_t1
        FreeSurfer's ``aseg.mgz`` atlas, in T1w-space at the BOLD resolution
        (only if ``recon-all`` was run).
    bold_aparc_t1
        FreeSurfer's ``aparc+aseg.mgz`` atlas, in T1w-space at the BOLD resolution
        (only if ``recon-all`` was run).

    See also
    --------
      * :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
      * :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`

    """
    from .util import init_bold_reference_wf
    workflow = Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=['name_source', 'ref_bold_brain', 'ref_bold_mask',
                    't1w_brain', 't1w_mask', 't1w_aseg', 't1w_aparc',
                    'bold_split', 'fieldwarp', 'hmc_xforms',
                    'itk_bold_to_t1']),
        name='inputnode'
    )

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'bold_t1', 'bold_t1_ref', 'bold_mask_t1',
            'bold_aseg_t1', 'bold_aparc_t1']),
        name='outputnode'
    )

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB

    mask_t1w_tfm = pe.Node(
        ApplyTransforms(interpolation='MultiLabel', float=True),
        name='mask_t1w_tfm', mem_gb=0.1
    )

    workflow.connect([
        (inputnode, gen_ref, [('ref_bold_brain', 'moving_image'),
                              ('t1w_brain', 'fixed_image'),
                              ('t1w_mask', 'fov_mask')]),
        (inputnode, mask_t1w_tfm, [('ref_bold_mask', 'input_image')]),
        (gen_ref, mask_t1w_tfm, [('out_file', 'reference_image')]),
        (inputnode, mask_t1w_tfm, [('itk_bold_to_t1', 'transforms')]),
        (mask_t1w_tfm, outputnode, [('output_image', 'bold_mask_t1')]),
    ])

    if freesurfer:
        # Resample aseg and aparc in T1w space (no transforms needed)
        aseg_t1w_tfm = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', transforms='identity', float=True),
            name='aseg_t1w_tfm', mem_gb=0.1)
        aparc_t1w_tfm = pe.Node(
            ApplyTransforms(interpolation='MultiLabel', transforms='identity', float=True),
            name='aparc_t1w_tfm', mem_gb=0.1)

        workflow.connect([
            (inputnode, aseg_t1w_tfm, [('t1w_aseg', 'input_image')]),
            (inputnode, aparc_t1w_tfm, [('t1w_aparc', 'input_image')]),
            (gen_ref, aseg_t1w_tfm, [('out_file', 'reference_image')]),
            (gen_ref, aparc_t1w_tfm, [('out_file', 'reference_image')]),
            (aseg_t1w_tfm, outputnode, [('output_image', 'bold_aseg_t1')]),
            (aparc_t1w_tfm, outputnode, [('output_image', 'bold_aparc_t1')]),
        ])

    bold_to_t1w_transform = pe.Node(
        MultiApplyTransforms(interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
        name='bold_to_t1w_transform', mem_gb=mem_gb * 3 * omp_nthreads, n_procs=omp_nthreads)

    # merge 3D volumes into 4D timeseries
    merge = pe.Node(Merge(compress=use_compression), name='merge', mem_gb=mem_gb)

    # Generate a reference on the target T1w space
    gen_final_ref = init_bold_reference_wf(omp_nthreads, pre_mask=True)

    if not multiecho:
        # Merge transforms placing the head motion correction last
        nforms = 2 + int(use_fieldwarp)
        merge_xforms = pe.Node(niu.Merge(nforms), name='merge_xforms',
                               run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
        if use_fieldwarp:
            workflow.connect([
                (inputnode, merge_xforms, [('fieldwarp', 'in2')])
            ])

        workflow.connect([
            # merge transforms
            (inputnode, merge_xforms, [
                ('hmc_xforms', 'in%d' % nforms),
                ('itk_bold_to_t1', 'in1')]),
            (merge_xforms, bold_to_t1w_transform, [('out', 'transforms')]),
            (inputnode, bold_to_t1w_transform, [('bold_split', 'input_image')]),
        ])

    else:
        from nipype.interfaces.fsl import Split as FSLSplit
        bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, bold_split, [('bold_split', 'in_file')]),
            (bold_split, bold_to_t1w_transform, [('out_files', 'input_image')]),
            (inputnode, bold_to_t1w_transform, [('itk_bold_to_t1', 'transforms')]),
        ])

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (gen_ref, bold_to_t1w_transform, [('out_file', 'reference_image')]),
        (bold_to_t1w_transform, merge, [('out_files', 'in_files')]),
        (merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
        (mask_t1w_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')]),
        (merge, outputnode, [('out_file', 'bold_t1')]),
        (gen_final_ref, outputnode, [('outputnode.ref_image', 'bold_t1_ref')]),
    ])

    return workflow
Ejemplo n.º 7
0
def init_func_preproc_wf(bold_file, has_fieldmap=False):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep import config
            from fmriprep.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file
    has_fieldmap
        Signals the workflow to use inputnode fieldmap files

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
    bold_ref
        BOLD reference file
    bold_ref_xfm
        Transform file in LTA format from bold to reference
    n_dummy_scans
        Number of nonsteady states at the beginning of the BOLD run

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confs_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.utility import DictMerge

    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10

    # Have some options handy
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces
    nibabies_dir = str(config.execution.nibabies_dir)

    # Extract BIDS entities and metadata from BOLD file(s)
    entities = extract_entities(bold_file)
    layout = config.execution.layout

    # Take first file as reference
    ref_file = pop_file(bold_file)
    metadata = layout.get_metadata(ref_file)
    # get original image orientation
    ref_orientation = get_img_orientation(ref_file)

    echo_idxs = listify(entities.get("echo", []))
    multiecho = len(echo_idxs) > 2
    if len(echo_idxs) == 1:
        config.loggers.workflow.warning(
            f"Running a single echo <{ref_file}> from a seemingly multi-echo dataset."
        )
        bold_file = ref_file  # Just in case - drop the list

    if len(echo_idxs) == 2:
        raise RuntimeError(
            "Multi-echo processing requires at least three different echos (found two)."
        )

    if multiecho:
        # Drop echo entity for future queries, have a boolean shorthand
        entities.pop("echo", None)
        # reorder echoes from shortest to largest
        tes, bold_file = zip(*sorted([(layout.get_metadata(bf)["EchoTime"], bf)
                                      for bf in bold_file]))
        ref_file = bold_file[0]  # Reset reference to be the shortest TE

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        f'Creating bold processing workflow for <{ref_file}> ({mem_gb["filesize"]:.2f} GB '
        f'/ {bold_tlen} TRs). Memory resampled/largemem={mem_gb["resampled"]:.2f}'
        f'/{mem_gb["largemem"]:.2f} GB.')

    # Find associated sbref, if possible
    entities['suffix'] = 'sbref'
    entities['extension'] = ['.nii', '.nii.gz']  # Overwrite extensions
    sbref_files = layout.get(return_type='file', **entities)

    sbref_msg = f"No single-band-reference found for {os.path.basename(ref_file)}."
    if sbref_files and 'sbref' in config.workflow.ignore:
        sbref_msg = "Single-band reference file(s) found and ignored."
    elif sbref_files:
        sbref_msg = "Using single-band reference file(s) {}.".format(','.join(
            [os.path.basename(sbf) for sbf in sbref_files]))
    config.loggers.workflow.info(sbref_msg)

    if has_fieldmap:
        # Search for intended fieldmap
        from pathlib import Path
        import re
        from sdcflows.fieldmaps import get_identifier

        bold_rel = re.sub(r"^sub-[a-zA-Z0-9]*/", "",
                          str(Path(bold_file).relative_to(layout.root)))
        estimator_key = get_identifier(bold_rel)
        if not estimator_key:
            has_fieldmap = False
            config.loggers.workflow.critical(
                f"None of the available B0 fieldmaps are associated to <{bold_rel}>"
            )

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore
               and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'bold_file',
            # from smriprep
            'anat_preproc',
            'anat_brain',
            'anat_mask',
            'anat_dseg',
            'anat_tpms',
            'anat_aseg',
            'anat_aparc',
            'anat2std_xfm',
            'std2anat_xfm',
            'template',
            # from bold reference workflow
            'bold_ref',
            'bold_ref_xfm',
            'n_dummy_scans',
            # from sdcflows (optional)
            'fmap',
            'fmap_ref',
            'fmap_coeff',
            'fmap_mask',
            'fmap_id',
            # if reconstructing with FreeSurfer (optional)
            'anat2fsnative_xfm',
            'fsnative2anat_xfm',
            'subject_id',
            'subjects_dir',
        ]),
        name='inputnode')
    inputnode.inputs.bold_file = bold_file

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_anat', 'bold_anat_ref', 'bold2anat_xfm', 'anat2bold_xfm',
        'bold_mask_anat', 'bold_aseg_anat', 'bold_aparc_anat', 'bold_std',
        'bold_std_ref', 'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std',
        'bold_native', 'bold_cifti', 'cifti_variant', 'cifti_metadata',
        'cifti_density', 'surfaces', 'confounds', 'aroma_noise_ics',
        'melodic_mix', 'nonaggr_denoised_file', 'confounds_metadata'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=config.workflow.bold2t1w_dof,
        registration_init=config.workflow.bold2t1w_init,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        echo_idx=echo_idxs,
        tr=metadata.get("RepetitionTime"),
        orientation=ref_orientation),
                      name='summary',
                      mem_gb=config.DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans
    # TODO: SDC: make dynamic
    summary.inputs.distortion_correction = 'None' if not has_fieldmap else 'TOPUP'

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=nibabies_dir,
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
        debug=config.execution.debug,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_anat', 'inputnode.bold_t1'),
            ('bold_anat_ref', 'inputnode.bold_t1_ref'),
            ('bold2anat_xfm', 'inputnode.bold2anat_xfm'),
            ('anat2bold_xfm', 'inputnode.anat2bold_xfm'),
            ('bold_aseg_anat', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_anat', 'inputnode.bold_aparc_t1'),
            ('bold_mask_anat', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
            ('acompcor_masks', 'inputnode.acompcor_masks'),
            ('tcompcor_mask', 'inputnode.tcompcor_mask'),
        ]),
    ])

    # Extract BOLD validation from init_bold_reference_wf
    val_bold = pe.MapNode(
        ValidateImage(),
        name="val_bold",
        mem_gb=config.DEFAULT_MEMORY_MIN_GB,
        iterfield=["in_file"],
    )
    val_bold.inputs.in_file = listify(bold_file)

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        freesurfer=freesurfer,
        mem_gb=mem_gb['resampled'],
        name='bold_reg_wf',
        omp_nthreads=omp_nthreads,
        sloppy=config.execution.sloppy,
        use_bbr=config.workflow.use_bbr,
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)
    if not has_fieldmap:
        bold_t1_trans_wf.inputs.inputnode.fieldwarp = 'identity'

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        freesurfer=freesurfer,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=False,  # TODO: Fieldwarp is already applied in new sdcflow
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (inputnode, bold_stc_wf, [('n_dummy_scans', 'inputnode.skip_vols')
                                      ]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(val_bold, bold_stc_wf, [
                (("out_file", pop_file), 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(val_bold, boldbuffer, [(("out_file", pop_file),
                                                   'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:  # instantiate relevant interfaces, imports
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        split_opt_comb = bold_split.clone(name='split_opt_comb')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(
                fields=['bold_files', 'skullstripped_bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files', 'skullstripped_bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       name='bold_t2smap_wf')

    # Mask BOLD reference image
    final_boldref_masker = pe.Node(BrainExtraction(),
                                   name='final_boldref_masker')

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (inputnode, bold_hmc_wf, [('bold_ref', 'inputnode.raw_ref_image')]),
        (inputnode, final_boldref_masker, [('bold_ref', 'in_file')]),
        (val_bold, bold_hmc_wf, [(("out_file", pop_file),
                                  'inputnode.bold_file')]),
        (inputnode, summary, [('n_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('anat_dseg', 'inputnode.t1w_dseg'),
                # Undefined if --fs-no-reconall, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('fsnative2anat_xfm', 'inputnode.fsnative2t1w_xfm')
            ]),
        (inputnode, bold_reg_wf, [('anat_brain', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('anat_mask', 'inputnode.t1w_mask'),
                                       ('anat_brain', 'inputnode.t1w_brain'),
                                       ('anat_aseg', 'inputnode.t1w_aseg'),
                                       ('anat_aparc', 'inputnode.t1w_aparc')]),
        (bold_reg_wf, outputnode,
         [('outputnode.itk_bold_to_t1', 'bold2anat_xfm'),
          ('outputnode.itk_t1_to_bold', 'anat2bold_xfm')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_anat'),
          ('outputnode.bold_t1_ref', 'bold_anat_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_anat'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_anat')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('anat_tpms', 'inputnode.t1w_tpms'),
                                        ('anat_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf,
         [('outputnode.movpar_file', 'inputnode.movpar_file'),
          ('outputnode.rmsd_file', 'inputnode.rmsd_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (inputnode, bold_confounds_wf, [('n_dummy_scans',
                                         'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
            ('outputnode.confounds_metadata', 'confounds_metadata'),
            ('outputnode.acompcor_masks', 'acompcor_masks'),
            ('outputnode.tcompcor_mask', 'tcompcor_mask'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        # TODO: Add SDC
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [('outputnode.bold',
                                                      'inputnode.bold')]),
            # (bold_bold_trans_wf, final_boldref_wf, [
            #     ('outputnode.bold', 'inputnode.bold_file')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
            (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                              'inputnode.hmc_xforms')]),
            # (bold_sdc_wf, bold_t1_trans_wf, [
            #     ('outputnode.out_warp', 'inputnode.fieldwarp')])
        ])
    else:  # for meepi, use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, join_echos, [('outputnode.bold', 'bold_files')
                                              ]),
            # (join_echos, final_boldref_wf, [
            #     ('bold_files', 'inputnode.bold_file')]),
            # TODO: Check with multi-echo data
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'skullstripped_bold_files')]),
            (join_echos, bold_t2s_wf, [('skullstripped_bold_files',
                                        'inputnode.bold_file')]),
            (bold_t2s_wf, bold_confounds_wf, [('outputnode.bold',
                                               'inputnode.bold')]),
            (bold_t2s_wf, split_opt_comb, [('outputnode.bold', 'in_file')]),
            (split_opt_comb, bold_t1_trans_wf, [('out_files',
                                                 'inputnode.bold_split')]),
        ])

        # Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
        bold_t1_trans_wf.inputs.inputnode.fieldwarp = 'identity'
        bold_t1_trans_wf.inputs.inputnode.hmc_xforms = 'identity'

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_anat')
                                           ]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (inputnode, func_derivatives_wf, [
                ('bold_ref', 'inputnode.bold_native_ref'),
            ]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf, outputnode,
             [('outputnode.bold', 'bold_native')])
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
        )
        if not has_fieldmap:
            bold_std_trans_wf.inputs.inputnode.fieldwarp = 'identity'

        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('template', 'inputnode.templates'),
              ('anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('anat_aseg', 'inputnode.bold_aseg'),
              ('anat_aparc', 'inputnode.bold_aparc')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            # TODO: Add SDC
            workflow.connect([
                (bold_split, bold_std_trans_wf, [('out_files',
                                                  'inputnode.bold_split')]),
                # (bold_sdc_wf, bold_std_trans_wf, [
                #     ('outputnode.out_warp', 'inputnode.fieldwarp')]),
                (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                                   'inputnode.hmc_xforms')]),
            ])
        else:
            workflow.connect([(split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

            # Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
            bold_std_trans_wf.inputs.inputnode.fieldwarp = 'identity'
            bold_std_trans_wf.inputs.inputnode.hmc_xforms = 'identity'

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (inputnode, ica_aroma_wf, [('n_dummy_scans',
                                            'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug(
            'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('anat2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [('outputnode.target',
                                                  'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [('subjects_dir',
                                                'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode,
                 [('outputnode.cifti_bold', 'bold_cifti'),
                  ('outputnode.cifti_variant', 'cifti_variant'),
                  ('outputnode.cifti_metadata', 'cifti_metadata'),
                  ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        if not config.workflow.cifti_output:
            config.loggers.workflow.critical(
                "The carpetplot requires CIFTI outputs")
        else:
            carpetplot_wf = init_carpetplot_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                cifti_output=bool(config.workflow.cifti_output),
                name='carpetplot_wf')

            workflow.connect([
                (bold_grayords_wf, carpetplot_wf, [('outputnode.cifti_bold',
                                                    'inputnode.cifti_bold')]),
                (bold_confounds_wf, carpetplot_wf,
                 [('outputnode.confounds_file', 'inputnode.confounds_file')]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(
        desc='summary', datatype="figures", dismiss_entities=("echo", )),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        desc='validation', datatype="figures", dismiss_entities=("echo", )),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (val_bold, ds_report_validation, [(("out_report", pop_file), 'in_file')
                                          ]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = nibabies_dir
            workflow.get_node(node).inputs.source_file = ref_file

    # Distortion correction
    if not has_fieldmap:
        # fmt: off
        # Finalize workflow with fieldmap-less connections
        workflow.connect([
            (inputnode, final_boldref_masker, [('bold_ref', 'in_file')]),
            (final_boldref_masker, bold_t1_trans_wf, [
                ('out_mask', 'inputnode.ref_bold_mask'),
                ('out_file', 'inputnode.ref_bold_brain'),
            ]),
            (final_boldref_masker, bold_reg_wf, [
                ('out_file', 'inputnode.ref_bold_brain'),
            ]),
            (final_boldref_masker, bold_confounds_wf,
             [('out_mask', 'inputnode.bold_mask')]),
        ])

        if nonstd_spaces.intersection(('T1w', 'anat')):
            workflow.connect([
                (final_boldref_masker, boldmask_to_t1w, [('out_mask',
                                                          'input_image')]),
            ])
        #         (final_boldref_wf, boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
        #     ])

        if nonstd_spaces.intersection(
            ('func', 'run', 'bold', 'boldref', 'sbref')):
            workflow.connect([
                (final_boldref_masker, func_derivatives_wf,
                 [('out_file', 'inputnode.bold_native_ref'),
                  ('out_mask', 'inputnode.bold_mask_native')]),
            ])
        #         (final_boldref_wf, func_derivatives_wf, [
        #             ('outputnode.ref_image', 'inputnode.bold_native_ref'),
        #             ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        #     ])

        if spaces.get_spaces(nonstandard=False, dim=(3, )):
            workflow.connect([
                (final_boldref_masker, bold_std_trans_wf,
                 [('out_mask', 'inputnode.bold_mask')]),
            ])
        #         (final_boldref_wf, bold_std_trans_wf, [
        #             ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        #     ])

        # fmt: on
        return workflow

    from niworkflows.interfaces.reportlets.registration import (
        SimpleBeforeAfterRPT as SimpleBeforeAfter, )
    from niworkflows.interfaces.utility import KeySelect
    from sdcflows.workflows.apply.registration import init_coeff2epi_wf
    from sdcflows.workflows.apply.correction import init_unwarp_wf

    coeff2epi_wf = init_coeff2epi_wf(
        debug="fieldmaps" in config.execution.debug,
        omp_nthreads=config.nipype.omp_nthreads,
        write_coeff=True,
    )
    unwarp_wf = init_unwarp_wf(debug="fieldmaps" in config.execution.debug,
                               omp_nthreads=config.nipype.omp_nthreads)
    unwarp_wf.inputs.inputnode.metadata = layout.get_metadata(str(bold_file))

    output_select = pe.Node(
        KeySelect(fields=["fmap", "fmap_ref", "fmap_coeff", "fmap_mask"]),
        name="output_select",
        run_without_submitting=True,
    )
    output_select.inputs.key = estimator_key[0]
    if len(estimator_key) > 1:
        config.loggers.workflow.warning(
            f"Several fieldmaps <{', '.join(estimator_key)}> are "
            f"'IntendedFor' <{bold_file}>, using {estimator_key[0]}")

    sdc_report = pe.Node(
        SimpleBeforeAfter(before_label="Distorted", after_label="Corrected"),
        name="sdc_report",
        mem_gb=0.1,
    )

    ds_report_sdc = pe.Node(
        DerivativesDataSink(base_directory=nibabies_dir,
                            desc="sdc",
                            suffix="bold",
                            datatype="figures",
                            dismiss_entities=("echo", )),
        name="ds_report_sdc",
        run_without_submitting=True,
    )

    unwarp_masker = pe.Node(BrainExtraction(), name='unwarp_masker')

    # fmt: off
    workflow.connect([
        (inputnode, output_select, [("fmap", "fmap"), ("fmap_ref", "fmap_ref"),
                                    ("fmap_coeff", "fmap_coeff"),
                                    ("fmap_mask", "fmap_mask"),
                                    ("fmap_id", "keys")]),
        (output_select, coeff2epi_wf, [("fmap_ref", "inputnode.fmap_ref"),
                                       ("fmap_coeff", "inputnode.fmap_coeff"),
                                       ("fmap_mask", "inputnode.fmap_mask")]),
        (inputnode, coeff2epi_wf, [("bold_ref", "inputnode.target_ref")]),
        (final_boldref_masker, coeff2epi_wf, [("out_file",
                                               "inputnode.target_mask")]),
        (inputnode, unwarp_wf, [("bold_ref", "inputnode.distorted")]),
        (coeff2epi_wf, unwarp_wf, [("outputnode.fmap_coeff",
                                    "inputnode.fmap_coeff")]),
        (inputnode, sdc_report, [("bold_ref", "before")]),
        (unwarp_wf, sdc_report, [("outputnode.corrected", "after"),
                                 ("outputnode.corrected_mask", "wm_seg")]),
        (inputnode, ds_report_sdc, [("bold_file", "source_file")]),
        (sdc_report, ds_report_sdc, [("out_report", "in_file")]),
        # remaining workflow connections
        (unwarp_wf, unwarp_masker, [('outputnode.corrected', 'in_file')]),
        (unwarp_masker, bold_confounds_wf, [('out_mask', 'inputnode.bold_mask')
                                            ]),
        (unwarp_masker, bold_t1_trans_wf,
         [('out_mask', 'inputnode.ref_bold_mask'),
          ('out_file', 'inputnode.ref_bold_brain')]),
        # (unwarp_masker, bold_bold_trans_wf, [
        #     ('out_mask', 'inputnode.bold_mask')]),  # Not used within workflow
        (unwarp_masker, bold_reg_wf, [('out_file', 'inputnode.ref_bold_brain')]
         ),
        # TODO: Add distortion correction method to sdcflow outputs?
        # (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
    ])

    if nonstd_spaces.intersection(('T1w', 'anat')):
        workflow.connect([
            (unwarp_masker, boldmask_to_t1w, [('out_mask', 'input_image')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (unwarp_masker, func_derivatives_wf,
             [('out_file', 'inputnode.bold_native_ref'),
              ('out_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        workflow.connect([
            (unwarp_masker, bold_std_trans_wf, [('out_mask',
                                                 'inputnode.bold_mask')]),
        ])
    # fmt: on

    # if not multiecho:
    #     (bold_sdc_wf, bold_t1_trans_wf, [
    #             ('outputnode.out_warp', 'inputnode.fieldwarp')])
    #     (bold_sdc_wf, bold_std_trans_wf, [
    #         ('outputnode.out_warp', 'inputnode.fieldwarp')]),
    # ])
    return workflow
Ejemplo n.º 8
0
def init_asl_preproc_wf(asl_file):
    """
    This workflow controls the functional preprocessing stages of *aslprep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from aslprep.workflows.tests import mock_config
            from aslprep import config
            from aslprep.workflows.asl.base import init_asl_preproc_wf
            with mock_config():
                asl_file = config.execution.bids_dir / 'sub-01' / 'perf' / 'sub-01_task-restEyesOpen_asl.nii.gz'
                wf = init_asl_preproc_wf(str(asl_file))

    Parameters
    ----------
    asl_file
        asl series NIfTI file

    Inputs
    ------
    asl_file
        asl series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    

    Outputs
    -------
    asl_t1
        asl series, resampled to T1w space
    asl_mask_t1
        asl series mask in T1w space
    asl_std
        asl series, resampled to template space
    asl_mask_std
        asl series mask in template space
    confounds
        TSV of confounds
    cbf_t1
        cbf times series in T1w space
    meancbf_t1
        mean cbf   in T1w space
    scorecbf_t1
        scorecbf times series in T1w space
    avgscorecbf_t1
        mean score cbf  in T1w space
    scrub_t1, pv_t1, basil_t1
        scrub, parital volume corrected and basil cbf   in T1w space
    cbf_std
        cbf times series in template space
    meancbf_std
        mean cbf   in template space
    scorecbf_std
        scorecbf times series in template space
    avgscorecbf_std
        mean score cbf  in template space
    scrub_std, pv_std, basil_std
        scrub, parital volume corrected and basil cbf   in template space
    qc_file
        quality control meausres 

    See Also
    --------

    * :py:func:`~aslprep.niworkflows.func.util.init_asl_reference_wf`
    * :py:func:`~aslprep.workflows.asl.stc.init_asl_stc_wf`
    * :py:func:`~aslprep.workflows.asl.hmc.init_asl_hmc_wf`
    * :py:func:`~aslprep.workflows.asl.t2s.init_asl_t2s_wf`
    * :py:func:`~aslprep.workflows.asl.registration.init_asl_t1_trans_wf`
    * :py:func:`~aslprep.workflows.asl.registration.init_asl_reg_wf`
    * :py:func:`~aslprep.workflows.asl.confounds.init_asl_confounds_wf`
    * :py:func:`~aslprep.workflows.asl.confounds.init_ica_aroma_wf`
    * :py:func:`~aslprep.workflows.asl.resampling.init_asl_std_trans_wf`
    * :py:func:`~aslprep.workflows.asl.resampling.init_asl_preproc_trans_wf`
    * :py:func:`~aslprep.workflows.asl.resampling.init_asl_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from ...niworkflows.func.util import init_asl_reference_wf
    from ...niworkflows.interfaces.nibabel import ApplyMask
    from ...niworkflows.interfaces.utility import KeySelect
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    ref_file = asl_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    asl_tlen = 10
    multiecho = isinstance(asl_file, list)

    # Have some options handy
    layout = config.execution.layout
    omp_nthreads = config.nipype.omp_nthreads
    spaces = config.workflow.spaces
    output_dir = str(config.execution.output_dir)
    dummyvols = config.workflow.dummy_vols
    smoothkernel = config.workflow.smooth_kernel
    mscale = config.workflow.m0_scale
    scorescrub = config.workflow.scorescrub
    basil = config.workflow.basil

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in asl_file]
        ref_file = dict(zip(tes, asl_file))[min(tes)]

    if os.path.isfile(ref_file):
        asl_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        'Creating asl processing workflow for "%s" (%.2f GB / %d TRs). '
        'Memory resampled/largemem=%.2f/%.2f GB.', ref_file,
        mem_gb['filesize'], asl_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # Find associated sbref, if possible
    entities = layout.parse_file_entities(ref_file)
    entities['suffix'] = 'sbref'
    entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
    files = layout.get(return_type='file', **entities)
    refbase = os.path.basename(ref_file)
    if 'sbref' in config.workflow.ignore:
        config.loggers.workflow.info("Single-band reference files ignored.")
    elif files and multiecho:
        config.loggers.workflow.warning(
            "Single-band reference found, but not supported in "
            "multi-echo workflows at this time. Ignoring.")
    elif files:
        sbref_file = files[0]
        sbbase = os.path.basename(sbref_file)
        if len(files) > 1:
            config.loggers.workflow.warning(
                "Multiple single-band reference files found for {}; using "
                "{}".format(refbase, sbbase))
        else:
            config.loggers.workflow.info(
                "Using single-band reference file %s.", sbbase)
    else:
        config.loggers.workflow.info("No single-band-reference found for %s.",
                                     refbase)

    metadata = layout.get_metadata(ref_file)

    # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
    fmaps = None
    if 'fieldmaps' not in config.workflow.ignore:
        fmaps = fieldmap_wrangler(layout,
                                  ref_file,
                                  use_syn=config.workflow.use_syn_sdc,
                                  force_syn=config.workflow.force_syn)
    elif config.workflow.use_syn_sdc or config.workflow.force_syn:
        # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
        fmaps = {'syn': False}

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore
               and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'asl_file', 't1w_preproc', 't1w_mask', 't1w_dseg', 't1w_tpms',
        'anat2std_xfm', 'std2anat_xfm', 'template'
    ]),
                        name='inputnode')
    inputnode.inputs.asl_file = asl_file
    subj_dir = str(config.execution.bids_dir) + '/sub-' + str(
        config.execution.participant_label[0])
    if sbref_file is not None:
        from ...niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'asl_t1', 'asl_t1_ref', 'asl_mask_t1', 'asl_std', 'asl_std_ref',
        'asl_mask_std', 'asl_native', 'cbf_t1', 'cbf_std', 'meancbf_t1',
        'meancbf_std', 'score_t1', 'score_std', 'avgscore_t1', 'avgscore_std',
        ' scrub_t1', 'scrub_std', 'basil_t1', 'basil_std', 'pv_t1', 'pv_std',
        'pv_native', 'att', 'att_t1', 'att_std', 'confounds',
        'confounds_metadata', 'qc_file'
    ]),
                         name='outputnode')

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')

    # asl buffer: an identity used as a pointer to either the original asl
    # or the STC'ed one for further use.
    aslbuffer = pe.Node(niu.IdentityInterface(fields=['asl_file']),
                        name='aslbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL'),
        registration_dof=config.workflow.asl2t1w_dof,
        registration_init=config.workflow.asl2t1w_init,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=config.DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    #summary.inputs.dummy_scans = config.workflow.dummy_scans

    asl_derivatives_wf = init_asl_derivatives_wf(bids_root=layout.root,
                                                 metadata=metadata,
                                                 output_dir=output_dir,
                                                 spaces=spaces,
                                                 scorescrub=scorescrub,
                                                 basil=basil)

    workflow.connect([
        (outputnode, asl_derivatives_wf, [
            ('asl_t1', 'inputnode.asl_t1'),
            ('asl_t1_ref', 'inputnode.asl_t1_ref'),
            ('asl_mask_t1', 'inputnode.asl_mask_t1'),
            ('asl_native', 'inputnode.asl_native'),
            ('confounds', 'inputnode.confounds'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative aslref
    asl_reference_wf = init_asl_reference_wf(omp_nthreads=omp_nthreads)
    asl_reference_wf.inputs.inputnode.dummy_scans = 0
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, asl_reference_wf, [('out_file', 'inputnode.sbref_file')
                                           ]),
        ])

    # Top-level asl splitter
    asl_split = pe.Node(FSLSplit(dimension='t'),
                        name='asl_split',
                        mem_gb=mem_gb['filesize'] * 3)

    # HMC on the asl
    asl_hmc_wf = init_asl_hmc_wf(name='asl_hmc_wf',
                                 mem_gb=mem_gb['filesize'],
                                 omp_nthreads=omp_nthreads)

    # calculate asl registration to T1w
    asl_reg_wf = init_asl_reg_wf(
        asl2t1w_dof=config.workflow.asl2t1w_dof,
        asl2t1w_init=config.workflow.asl2t1w_init,
        mem_gb=mem_gb['resampled'],
        name='asl_reg_wf',
        omp_nthreads=omp_nthreads,
        sloppy=config.execution.debug,
        use_bbr=config.workflow.use_bbr,
        use_compression=False,
    )

    # apply asl registration to T1w
    nonstd_spaces = set(spaces.get_nonstandard())
    t1cbfspace = False
    if nonstd_spaces.intersection(('T1w', 'anat')):
        t1cbfspace = True

    asl_t1_trans_wf = init_asl_t1_trans_wf(name='asl_t1_trans_wf',
                                           use_fieldwarp=bool(fmaps),
                                           multiecho=multiecho,
                                           cbft1space=t1cbfspace,
                                           scorescrub=scorescrub,
                                           basil=basil,
                                           mem_gb=mem_gb['resampled'],
                                           omp_nthreads=omp_nthreads,
                                           use_compression=False)

    # get confounds
    asl_confounds_wf = init_asl_confs_wf(mem_gb=mem_gb['largemem'],
                                         metadata=metadata,
                                         name='asl_confounds_wf')
    asl_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    asl_asl_trans_wf = init_asl_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=bool(fmaps),
        name='asl_asl_trans_wf')
    asl_asl_trans_wf.inputs.inputnode.name_source = ref_file

    #refinemaskj = pe.Node(refinemask(),mem_gb=0.2,
    #run_without_submitting=True,
    #name="refinemask")

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        asl_stc_wf = init_asl_stc_wf(name='asl_stc_wf', metadata=metadata)
        workflow.connect([
            (asl_reference_wf, asl_stc_wf, [('outputnode.skip_vols',
                                             'inputnode.skip_vols')]),
            (asl_stc_wf, aslbuffer, [('outputnode.stc_file', 'asl_file')]),
        ])
        if not multiecho:
            workflow.connect([(asl_reference_wf, asl_stc_wf, [
                ('outputnode.asl_file', 'inputnode.asl_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = aslbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('asl_file', asl_file)
            workflow.connect([(meepi_echos, asl_stc_wf,
                               [('asl_file', 'inputnode.asl_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original asl to the splitter through aslbuffer
        workflow.connect([(asl_reference_wf, aslbuffer,
                           [('outputnode.asl_file', 'asl_file')])])
    else:
        # for meepi, iterate over all meepi echos to aslbuffer
        aslbuffer.iterables = ('asl_file', asl_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    asl_sdc_wf = init_sdc_estimate_wf(fmaps,
                                      metadata,
                                      omp_nthreads=omp_nthreads,
                                      debug=config.execution.debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from ...niworkflows.func.util import init_skullstrip_asl_wf
        skullstrip_asl_wf = init_skullstrip_asl_wf(name='skullstrip_asl_wf')

        inputnode.inputs.asl_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['asl_files']),
            joinsource=('meepi_echos' if run_stc is True else 'aslbuffer'),
            joinfield=['asl_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        asl_t2s_wf = init_asl_t2s_wf(echo_times=tes,
                                     mem_gb=mem_gb['resampled'],
                                     omp_nthreads=omp_nthreads,
                                     name='asl_t2smap_wf')

        workflow.connect([
            (skullstrip_asl_wf, join_echos, [('outputnode.skull_stripped_file',
                                              'asl_files')]),
            (join_echos, asl_t2s_wf, [('asl_files', 'inputnode.asl_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        (inputnode, t1w_brain, [('t1w_preproc', 'in_file'),
                                ('t1w_mask', 'in_mask')]),
        # Generate early reference
        (inputnode, asl_reference_wf, [('asl_file', 'inputnode.asl_file')]),
        # asl buffer has slice-time corrected if it was run, original otherwise
        (aslbuffer, asl_split, [('asl_file', 'in_file')]),
        # HMC
        (asl_reference_wf, asl_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.asl_file', 'inputnode.asl_file')]),
        #(asl_reference_wf, summary, [
        #('outputnode.algo_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (inputnode, asl_reg_wf, [
            ('t1w_dseg', 'inputnode.t1w_dseg'),
        ]),
        (t1w_brain, asl_reg_wf, [('out_file', 'inputnode.t1w_brain')]),
        (inputnode, asl_t1_trans_wf, [
            ('asl_file', 'inputnode.name_source'),
            ('t1w_mask', 'inputnode.t1w_mask'),
        ]),
        (t1w_brain, asl_t1_trans_wf, [('out_file', 'inputnode.t1w_brain')]),
        # unused if multiecho, but this is safe
        (asl_hmc_wf, asl_t1_trans_wf, [('outputnode.xforms',
                                        'inputnode.hmc_xforms')]),
        (asl_reg_wf, asl_t1_trans_wf, [('outputnode.itk_asl_to_t1',
                                        'inputnode.itk_asl_to_t1')]),
        (asl_t1_trans_wf, outputnode, [
            ('outputnode.asl_t1', 'asl_t1'),
            ('outputnode.asl_t1_ref', 'asl_t1_ref'),
        ]),
        (asl_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (t1w_brain, asl_sdc_wf, [('out_file', 'inputnode.t1w_brain')]),
        (asl_reference_wf, asl_sdc_wf,
         [('outputnode.ref_image', 'inputnode.epi_file'),
          ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
          ('outputnode.asl_mask', 'inputnode.epi_mask')]),
        (asl_sdc_wf, asl_t1_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.epi_mask', 'inputnode.ref_asl_mask'),
          ('outputnode.epi_brain', 'inputnode.ref_asl_brain')]),
        (asl_sdc_wf, asl_asl_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.epi_mask', 'inputnode.asl_mask')]),
        (asl_sdc_wf, asl_reg_wf, [('outputnode.epi_brain',
                                   'inputnode.ref_asl_brain')]),
        (asl_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                               ]),
        # Connect asl_confounds_wf
        (inputnode, asl_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                       ('t1w_mask', 'inputnode.t1w_mask')]),
        (asl_hmc_wf, asl_confounds_wf, [('outputnode.movpar_file',
                                         'inputnode.movpar_file')]),
        (asl_reg_wf, asl_confounds_wf, [('outputnode.itk_t1_to_asl',
                                         'inputnode.t1_asl_xform')]),
        (asl_reference_wf, asl_confounds_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
        (asl_asl_trans_wf, asl_confounds_wf, [
            ('outputnode.asl_mask', 'inputnode.asl_mask'),
        ]),
        (asl_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (asl_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect asl_asl_trans_wf
        (asl_split, asl_asl_trans_wf, [('out_files', 'inputnode.asl_file')]),
        (asl_hmc_wf, asl_asl_trans_wf, [('outputnode.xforms',
                                         'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, asl_derivatives_wf, [('asl_file',
                                              'inputnode.source_file')]),
            (asl_asl_trans_wf, asl_confounds_wf, [('outputnode.asl',
                                                   'inputnode.asl')]),
            (asl_split, asl_t1_trans_wf, [('out_files', 'inputnode.asl_split')
                                          ]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, asl_derivatives_wf,
             [(('asl_file', combine_meepi_source), 'inputnode.source_file')]),
            (asl_asl_trans_wf, skullstrip_asl_wf, [('outputnode.asl',
                                                    'inputnode.in_file')]),
            (asl_t2s_wf, asl_confounds_wf, [('outputnode.asl', 'inputnode.asl')
                                            ]),
            (asl_t2s_wf, asl_t1_trans_wf, [('outputnode.asl',
                                            'inputnode.asl_split')]),
        ])

    # compute  the CBF here
    compt_cbf_wf = init_cbf_compt_wf(name='compt_cbf_wf',
                                     mem_gb=mem_gb['filesize'],
                                     omp_nthreads=omp_nthreads,
                                     dummy_vols=dummyvols,
                                     M0Scale=mscale,
                                     bids_dir=subj_dir,
                                     scorescrub=scorescrub,
                                     basil=basil,
                                     smooth_kernel=smoothkernel,
                                     metadata=metadata)

    # cbf computation workflow
    workflow.connect([
        (asl_asl_trans_wf, compt_cbf_wf,
         [('outputnode.asl', 'inputnode.asl_file'),
          ('outputnode.asl_mask', 'inputnode.asl_mask')]),
        (inputnode, compt_cbf_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                   ('asl_file', 'inputnode.in_file')]),
        (asl_reg_wf, compt_cbf_wf, [('outputnode.itk_t1_to_asl',
                                     'inputnode.t1_asl_xform')]),
        (asl_reg_wf, compt_cbf_wf, [('outputnode.itk_asl_to_t1',
                                     'inputnode.itk_asl_to_t1')]),
        (inputnode, compt_cbf_wf, [('t1w_mask', 'inputnode.t1w_mask')]),
    ])

    refine_mask = pe.Node(refinemask(),
                          mem_gb=1.0,
                          run_without_submitting=True,
                          name="refinemask")
    workflow.connect([
        (asl_asl_trans_wf, refine_mask, [('outputnode.asl_mask', 'in_aslmask')
                                         ]),
        (asl_reg_wf, refine_mask, [('outputnode.itk_t1_to_asl', 'transforms')
                                   ]),
        (inputnode, refine_mask, [('t1w_mask', 'in_t1mask')]),
    ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on asl correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1w_dseg',
                                                 'inputnode.in_seg')]),
            (asl_reference_wf, fmap_unwarp_report_wf, [('outputnode.ref_image',
                                                        'inputnode.in_pre')]),
            (asl_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_asl',
                                                  'inputnode.in_xfm')]),
            (asl_sdc_wf, fmap_unwarp_report_wf, [('outputnode.epi_corrected',
                                                  'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        # And ensure echo is dropped from report
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'aslprep'
                fmap_unwarp_report_wf.get_node(
                    node).inputs.dismiss_entities = ("echo", )

        for node in asl_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                asl_sdc_wf.get_node(node).interface.out_path_base = 'aslprep'
                asl_sdc_wf.get_node(node).inputs.dismiss_entities = ("echo", )

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(KeySelect(fields=['std2anat_xfm']),
                                     name='sdc_select_std',
                                     run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
                                             ('template', 'keys')]),
                (sdc_select_std, asl_sdc_wf, [('std2anat_xfm',
                                               'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1w_dseg',
                                                    'inputnode.in_seg')]),
                (asl_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (asl_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_asl', 'inputnode.in_xfm')]),
                (asl_sdc_wf, syn_unwarp_report_wf, [('outputnode.syn_ref',
                                                     'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            # And ensure echo is dropped from report
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'aslprep'
                    syn_unwarp_report_wf.get_node(
                        node).inputs.dismiss_entities = ("echo", )

    # Map final asl mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from ...niworkflows.interfaces.fixes import (FixHeaderApplyTransforms
                                                     as ApplyTransforms)

        aslmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                                 name='aslmask_to_t1w',
                                 mem_gb=0.1)
        workflow.connect([
            (asl_reg_wf, aslmask_to_t1w, [('outputnode.itk_asl_to_t1',
                                           'transforms')]),
            (asl_t1_trans_wf, aslmask_to_t1w, [('outputnode.asl_mask_t1',
                                                'reference_image')]),
            (refine_mask, aslmask_to_t1w, [('out_mask', 'input_image')]),
            (aslmask_to_t1w, outputnode, [('output_image', 'asl_mask_t1')]),
        ])
        workflow.connect([
            (compt_cbf_wf, asl_t1_trans_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
            ]),
            (asl_t1_trans_wf, asl_derivatives_wf, [
                ('outputnode.cbf_t1', 'inputnode.cbf_t1'),
                ('outputnode.meancbf_t1', 'inputnode.meancbf_t1'),
            ]),
        ])

        if scorescrub:
            workflow.connect([
                (compt_cbf_wf, asl_t1_trans_wf, [
                    ('outputnode.out_score', 'inputnode.score'),
                    ('outputnode.out_avgscore', 'inputnode.avgscore'),
                    ('outputnode.out_scrub', 'inputnode.scrub'),
                ]),
                (asl_t1_trans_wf, asl_derivatives_wf, [
                    ('outputnode.scrub_t1', 'inputnode.scrub_t1'),
                    ('outputnode.score_t1', 'inputnode.score_t1'),
                    ('outputnode.avgscore_t1', 'inputnode.avgscore_t1'),
                ])
            ])
        if basil:
            workflow.connect([(compt_cbf_wf, asl_t1_trans_wf, [
                ('outputnode.out_cbfb', 'inputnode.basil'),
                ('outputnode.out_cbfpv', 'inputnode.pv'),
                ('outputnode.out_att', 'inputnode.att'),
            ]),
                              (asl_t1_trans_wf, asl_derivatives_wf, [
                                  ('outputnode.basil_t1',
                                   'inputnode.basil_t1'),
                                  ('outputnode.pv_t1', 'inputnode.pv_t1'),
                                  ('outputnode.att_t1', 'inputnode.att_t1'),
                              ])])

    if nonstd_spaces.intersection(('func', 'run', 'asl', 'aslref', 'sbref')):
        workflow.connect([
            (asl_asl_trans_wf, outputnode, [('outputnode.asl', 'asl_native')]),
            (asl_asl_trans_wf, asl_derivatives_wf,
             [('outputnode.asl_ref', 'inputnode.asl_native_ref')]),
            (refine_mask, asl_derivatives_wf, [('out_mask',
                                                'inputnode.asl_mask_native')]),
            (compt_cbf_wf, asl_derivatives_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
            ]),
        ])

        if scorescrub:
            workflow.connect([(compt_cbf_wf, asl_derivatives_wf, [
                ('outputnode.out_score', 'inputnode.score'),
                ('outputnode.out_avgscore', 'inputnode.avgscore'),
                ('outputnode.out_scrub', 'inputnode.scrub'),
            ])])
        if basil:
            workflow.connect([
                (compt_cbf_wf, asl_derivatives_wf,
                 [('outputnode.out_cbfb', 'inputnode.basil'),
                  ('outputnode.out_cbfpv', 'inputnode.pv'),
                  ('outputnode.out_att', 'inputnode.att')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        asl_std_trans_wf = init_asl_std_trans_wf(
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            scorescrub=scorescrub,
            basil=basil,
            name='asl_std_trans_wf',
            use_compression=not config.execution.low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, asl_std_trans_wf, [
                ('template', 'inputnode.templates'),
                ('anat2std_xfm', 'inputnode.anat2std_xfm'),
                ('asl_file', 'inputnode.name_source'),
            ]),
            (asl_hmc_wf, asl_std_trans_wf, [('outputnode.xforms',
                                             'inputnode.hmc_xforms')]),
            (asl_reg_wf, asl_std_trans_wf, [('outputnode.itk_asl_to_t1',
                                             'inputnode.itk_asl_to_t1')]),
            (refine_mask, asl_std_trans_wf, [('out_mask', 'inputnode.asl_mask')
                                             ]),
            (asl_sdc_wf, asl_std_trans_wf, [('outputnode.out_warp',
                                             'inputnode.fieldwarp')]),
            (compt_cbf_wf, asl_std_trans_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
            ]),
        ])

        if scorescrub:
            workflow.connect([
                (compt_cbf_wf, asl_std_trans_wf, [
                    ('outputnode.out_score', 'inputnode.score'),
                    ('outputnode.out_avgscore', 'inputnode.avgscore'),
                    ('outputnode.out_scrub', 'inputnode.scrub'),
                ]),
            ])
        if basil:
            workflow.connect([
                (compt_cbf_wf, asl_std_trans_wf, [
                    ('outputnode.out_cbfb', 'inputnode.basil'),
                    ('outputnode.out_cbfpv', 'inputnode.pv'),
                    ('outputnode.out_att', 'inputnode.att'),
                ]),
            ])

        if not multiecho:
            workflow.connect([(asl_split, asl_std_trans_wf,
                               [('out_files', 'inputnode.asl_split')])])
        else:
            split_opt_comb = asl_split.clone(name='split_opt_comb')
            workflow.connect([(asl_t2s_wf, split_opt_comb, [('outputnode.asl',
                                                             'in_file')]),
                              (split_opt_comb, asl_std_trans_wf,
                               [('out_files', 'inputnode.asl_split')])])

        # asl_derivatives_wf internally parametrizes over snapshotted spaces.
        # asl_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (asl_std_trans_wf, asl_derivatives_wf,
             [('outputnode.template', 'inputnode.template'),
              ('outputnode.spatial_reference', 'inputnode.spatial_reference'),
              ('outputnode.asl_std_ref', 'inputnode.asl_std_ref'),
              ('outputnode.asl_std', 'inputnode.asl_std'),
              ('outputnode.asl_mask_std', 'inputnode.asl_mask_std'),
              ('outputnode.cbf_std', 'inputnode.cbf_std'),
              ('outputnode.meancbf_std', 'inputnode.meancbf_std')]),
        ])
        if scorescrub:
            workflow.connect([
                (asl_std_trans_wf, asl_derivatives_wf, [
                    ('outputnode.score_std', 'inputnode.score_std'),
                    ('outputnode.avgscore_std', 'inputnode.avgscore_std'),
                    ('outputnode.scrub_std', 'inputnode.scrub_std'),
                ]),
            ])

        if basil:
            workflow.connect([
                (asl_std_trans_wf, asl_derivatives_wf,
                 [('outputnode.basil_std', 'inputnode.basil_std'),
                  ('outputnode.pv_std', 'inputnode.pv_std'),
                  ('outputnode.att_std', 'inputnode.att_std')]),
            ])

    compt_qccbf_wf = init_cbfqc_compt_wf(name='compt_qccbf_wf',
                                         mem_gb=mem_gb['filesize'],
                                         omp_nthreads=omp_nthreads,
                                         asl_file=asl_file,
                                         scorescrub=scorescrub,
                                         basil=basil,
                                         metadata=metadata)
    workflow.connect([
        (refine_mask, compt_qccbf_wf, [('out_mask', 'inputnode.asl_mask')]),
        (inputnode, compt_qccbf_wf, [('t1w_tpms', 'inputnode.t1w_tpms')]),
        (asl_reg_wf, compt_qccbf_wf, [('outputnode.itk_t1_to_asl',
                                       'inputnode.t1_asl_xform')]),
        (inputnode, compt_qccbf_wf, [('t1w_mask', 'inputnode.t1w_mask')]),
        (compt_cbf_wf, compt_qccbf_wf, [('outputnode.out_mean',
                                         'inputnode.meancbf')]),
        (asl_confounds_wf, compt_qccbf_wf, [('outputnode.confounds_file',
                                             'inputnode.confmat')]),
        (compt_qccbf_wf, outputnode, [('outputnode.qc_file', 'qc_file')]),
        (compt_qccbf_wf, asl_derivatives_wf, [('outputnode.qc_file',
                                               'inputnode.qc_file')]),
        (compt_qccbf_wf, summary, [('outputnode.qc_file', 'qc_file')]),
    ])

    if scorescrub:
        workflow.connect([
            (compt_cbf_wf, compt_qccbf_wf, [
                ('outputnode.out_avgscore', 'inputnode.avgscore'),
                ('outputnode.out_scrub', 'inputnode.scrub'),
            ]),
        ])
    if basil:
        workflow.connect([
            (compt_cbf_wf, compt_qccbf_wf, [
                ('outputnode.out_cbfb', 'inputnode.basil'),
                ('outputnode.out_cbfpv', 'inputnode.pv'),
            ]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        workflow.connect([
            (asl_std_trans_wf, compt_qccbf_wf, [('outputnode.asl_mask_std',
                                                 'inputnode.asl_mask_std')]),
        ])

    cbf_plot = init_cbfplot_wf(mem_gb=mem_gb['filesize'],
                               metadata=metadata,
                               scorescrub=scorescrub,
                               basil=basil,
                               omp_nthreads=omp_nthreads,
                               name='cbf_plot')
    workflow.connect([
        (compt_cbf_wf, cbf_plot,
         [('outputnode.out_mean', 'inputnode.cbf'),
          ('outputnode.out_avgscore', 'inputnode.score'),
          ('outputnode.out_scrub', 'inputnode.scrub'),
          ('outputnode.out_cbfb', 'inputnode.basil'),
          ('outputnode.out_cbfpv', 'inputnode.pvc'),
          ('outputnode.out_score', 'inputnode.score_ts'),
          ('outputnode.out_cbf', 'inputnode.cbf_ts')]),
        (inputnode, cbf_plot, [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
        (asl_reg_wf, cbf_plot, [('outputnode.itk_t1_to_asl',
                                 'inputnode.t1_asl_xform')]),
        (refine_mask, cbf_plot, [('out_mask', 'inputnode.asl_mask')]),
        (asl_reference_wf, cbf_plot, [('outputnode.ref_image_brain',
                                       'inputnode.asl_ref')]),
        (asl_confounds_wf, cbf_plot, [('outputnode.confounds_file',
                                       'inputnode.confounds_file')]),
        (compt_cbf_wf, cbf_plot, [('outputnode.out_scoreindex',
                                   'inputnode.scoreindex')]),
    ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            # cifti_output=config.workflow.cifti_output,
            name='carpetplot_wf')

        # Xform to 'MNI152NLin2009cAsym' is always computed.
        carpetplot_select_std = pe.Node(KeySelect(fields=['std2anat_xfm'],
                                                  key='MNI152NLin2009cAsym'),
                                        name='carpetplot_select_std',
                                        run_without_submitting=True)

        workflow.connect([
            (inputnode, carpetplot_select_std, [('std2anat_xfm',
                                                 'std2anat_xfm'),
                                                ('template', 'keys')]),
            (carpetplot_select_std, carpetplot_wf,
             [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
            (asl_asl_trans_wf if not multiecho else asl_t2s_wf, carpetplot_wf,
             [('outputnode.asl', 'inputnode.asl')]),
            (refine_mask, carpetplot_wf, [('out_mask', 'inputnode.asl_mask')]),
            (asl_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_asl',
                                          'inputnode.t1_asl_xform')]),
        ])

        workflow.connect([(asl_confounds_wf, carpetplot_wf, [
            ('outputnode.confounds_file', 'inputnode.confounds_file')
        ])])
    cbfroiqu = init_cbfroiquant_wf(mem_gb=mem_gb['filesize'],
                                   basil=basil,
                                   scorescrub=scorescrub,
                                   omp_nthreads=omp_nthreads,
                                   name='cbf_roiquant')
    workflow.connect([
        (asl_asl_trans_wf, cbfroiqu, [('outputnode.asl_mask',
                                       'inputnode.aslmask')]),
        (inputnode, cbfroiqu, [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
        (asl_reg_wf, cbfroiqu, [('outputnode.itk_t1_to_asl',
                                 'inputnode.t1_asl_xform')]),
        (compt_cbf_wf, cbfroiqu, [
            ('outputnode.out_mean', 'inputnode.cbf'),
        ]),
        (cbfroiqu, asl_derivatives_wf, [
            ('outputnode.cbf_hvoxf', 'inputnode.cbf_hvoxf'),
            ('outputnode.cbf_sc207', 'inputnode.cbf_sc207'),
            ('outputnode.cbf_sc217', 'inputnode.cbf_sc217'),
            ('outputnode.cbf_sc407', 'inputnode.cbf_sc407'),
            ('outputnode.cbf_sc417', 'inputnode.cbf_sc417'),
        ]),
    ])

    if scorescrub:
        workflow.connect([
            (compt_cbf_wf, cbfroiqu,
             [('outputnode.out_avgscore', 'inputnode.score'),
              ('outputnode.out_scrub', 'inputnode.scrub')]),
            (cbfroiqu, asl_derivatives_wf, [
                ('outputnode.score_hvoxf', 'inputnode.score_hvoxf'),
                ('outputnode.score_sc207', 'inputnode.score_sc207'),
                ('outputnode.score_sc217', 'inputnode.score_sc217'),
                ('outputnode.score_sc407', 'inputnode.score_sc407'),
                ('outputnode.score_sc417', 'inputnode.score_sc417'),
                ('outputnode.scrub_hvoxf', 'inputnode.scrub_hvoxf'),
                ('outputnode.scrub_sc207', 'inputnode.scrub_sc207'),
                ('outputnode.scrub_sc217', 'inputnode.scrub_sc217'),
                ('outputnode.scrub_sc407', 'inputnode.scrub_sc407'),
                ('outputnode.scrub_sc417', 'inputnode.scrub_sc417'),
            ]),
        ])
    if basil:
        workflow.connect([
            (compt_cbf_wf, cbfroiqu,
             [('outputnode.out_cbfb', 'inputnode.basil'),
              ('outputnode.out_cbfpv', 'inputnode.pvc')]),
            (cbfroiqu, asl_derivatives_wf, [
                ('outputnode.basil_hvoxf', 'inputnode.basil_hvoxf'),
                ('outputnode.basil_sc207', 'inputnode.basil_sc207'),
                ('outputnode.basil_sc217', 'inputnode.basil_sc217'),
                ('outputnode.basil_sc407', 'inputnode.basil_sc407'),
                ('outputnode.basil_sc417', 'inputnode.basil_sc417'),
                ('outputnode.pvc_hvoxf', 'inputnode.pvc_hvoxf'),
                ('outputnode.pvc_sc207', 'inputnode.pvc_sc207'),
                ('outputnode.pvc_sc217', 'inputnode.pvc_sc217'),
                ('outputnode.pvc_sc407', 'inputnode.pvc_sc407'),
                ('outputnode.pvc_sc417', 'inputnode.pvc_sc417'),
            ]),
        ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(
        desc='summary', datatype="figures", dismiss_entities=("echo", )),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=output_dir,
        desc='validation',
        datatype="figures",
        dismiss_entities=("echo", )),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (asl_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = output_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 9
0
def init_asl_t1_trans_wf(mem_gb,
                         omp_nthreads,
                         scorescrub=False,
                         basil=False,
                         cbft1space=False,
                         multiecho=False,
                         use_fieldwarp=False,
                         use_compression=True,
                         name='asl_t1_trans_wf'):
    """
    Co-register the reference ASL image to T1w-space.

    The workflow uses :abbr:`BBR (boundary-based registration)`.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from aslprep.workflows.asl.registration import init_asl_t1_trans_wf
            wf = init_asl_t1_trans_wf(
                                       mem_gb=3,
                                       omp_nthreads=1)

    Parameters
    ----------
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from ASLto T1
    multiecho : :obj:`bool`
        If multiecho data was supplied, HMC already performed
    mem_gb : :obj:`float`
        Size of ASL file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    use_compression : :obj:`bool`
        Save registered ASL series as ``.nii.gz``
    name : :obj:`str`
        Name of workflow (default: ``asl_reg_wf``)

    Inputs
    ------
    name_source
        ASL series NIfTI file
        Used to recover original information lost during processing
    ref_asl_brain
        Reference image to which ASL series is aligned
        If ``fieldwarp == True``, ``ref_asl_brain`` should be unwarped
    ref_asl_mask
        Skull-stripping mask of reference image
    t1w_brain
        Skull-stripped bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    asl_split
        Individual 3D ASL volumes, not motion corrected
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    itk_asl_to_t1
        Affine transform from ``ref_asl_brain`` to T1 space (ITK format)
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    ASL_t1
        Motion-corrected ASL series in T1 space
    asl_t1_ref
        Reference, contrast-enhanced summary of the motion-corrected ASL series in T1w space
    asl_mask_t1
        ASL mask in T1 space

    See also
    --------
      * :py:func:`~aslprep.workflows.asl.registration.init_bbreg_wf`
      * :py:func:`~aslprep.workflows.asl.registration.init_fsl_bbr_wf`

    """
    from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from ...niworkflows.func.util import init_asl_reference_wf
    from ...niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from ...niworkflows.interfaces.itk import MultiApplyTransforms
    from ...niworkflows.interfaces.nilearn import Merge
    from ...niworkflows.interfaces.utils import GenerateSamplingReference

    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'name_source', 'ref_asl_brain', 'ref_asl_mask', 't1w_brain',
        't1w_mask', 'asl_split', 'fieldwarp', 'hmc_xforms', 'cbf', 'meancbf',
        'att', 'score', 'avgscore', 'scrub', 'basil', 'pv', 'itk_asl_to_t1'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'asl_t1', 'asl_t1_ref', 'asl_mask_t1', 'att_t1', 'cbf_t1',
        'meancbf_t1', 'score_t1', 'avgscore_t1', 'scrub_t1', 'basil_t1',
        'pv_t1'
    ]),
                         name='outputnode')

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB

    mask_t1w_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                           name='mask_t1w_tfm',
                           mem_gb=0.1)

    workflow.connect([
        (inputnode, gen_ref, [('ref_asl_brain', 'moving_image'),
                              ('t1w_brain', 'fixed_image'),
                              ('t1w_mask', 'fov_mask')]),
        (inputnode, mask_t1w_tfm, [('ref_asl_mask', 'input_image')]),
        (gen_ref, mask_t1w_tfm, [('out_file', 'reference_image')]),
        (inputnode, mask_t1w_tfm, [('itk_asl_to_t1', 'transforms')]),
        (mask_t1w_tfm, outputnode, [('output_image', 'asl_mask_t1')]),
    ])

    asl_to_t1w_transform = pe.Node(MultiApplyTransforms(
        interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
                                   name='asl_to_t1w_transform',
                                   mem_gb=mem_gb * 3 * omp_nthreads,
                                   n_procs=omp_nthreads)
    # merge 3D volumes into 4D timeseries
    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb)

    # Generate a reference on the target T1w space
    gen_final_ref = init_asl_reference_wf(omp_nthreads, pre_mask=True)

    if not multiecho:
        # Merge transforms placing the head motion correction last
        nforms = 2 + int(use_fieldwarp)
        merge_xforms = pe.Node(niu.Merge(nforms),
                               name='merge_xforms',
                               run_without_submitting=True,
                               mem_gb=DEFAULT_MEMORY_MIN_GB)
        if use_fieldwarp:
            workflow.connect([(inputnode, merge_xforms, [('fieldwarp', 'in2')])
                              ])

        workflow.connect([
            # merge transforms
            (inputnode, merge_xforms, [('hmc_xforms', 'in%d' % nforms),
                                       ('itk_asl_to_t1', 'in1')]),
            (merge_xforms, asl_to_t1w_transform, [('out', 'transforms')]),
            (inputnode, asl_to_t1w_transform, [('asl_split', 'input_image')]),
            (inputnode, merge, [('name_source', 'header_source')]),
            (gen_ref, asl_to_t1w_transform, [('out_file', 'reference_image')]),
            (asl_to_t1w_transform, merge, [('out_files', 'in_files')]),
            (merge, gen_final_ref, [('out_file', 'inputnode.asl_file')]),
            (mask_t1w_tfm, gen_final_ref, [('output_image',
                                            'inputnode.asl_mask')]),
            (merge, outputnode, [('out_file', 'asl_t1')]),
        ])

    else:
        from nipype.interfaces.fsl import Split as FSLSplit
        asl_split = pe.Node(FSLSplit(dimension='t'),
                            name='asl_split',
                            mem_gb=DEFAULT_MEMORY_MIN_GB)

        workflow.connect([
            (inputnode, asl_split, [('asl_split', 'in_file')]),
            (asl_split, asl_to_t1w_transform, [('out_files', 'input_image')]),
            (inputnode, asl_to_t1w_transform, [('itk_asl_to_t1', 'transforms')
                                               ]),
            (inputnode, merge, [('name_source', 'header_source')]),
            (gen_ref, asl_to_t1w_transform, [('out_file', 'reference_image')]),
            (asl_to_t1w_transform, merge, [('out_files', 'in_files')]),
            (merge, gen_final_ref, [('out_file', 'inputnode.asl_file')]),
            (mask_t1w_tfm, gen_final_ref, [('output_image',
                                            'inputnode.asl_mask')]),
            (merge, outputnode, [('out_file', 'asl_t1')]),
        ])

    if cbft1space:
        cbf_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3,
            dimension=3),
                                       name='cbf_to_t1w_transform',
                                       mem_gb=mem_gb * 3 * omp_nthreads,
                                       n_procs=omp_nthreads)
        meancbf_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                           name='meancbf_to_t1w_transform',
                                           mem_gb=mem_gb * 3 * omp_nthreads,
                                           n_procs=omp_nthreads)

        workflow.connect([
            (gen_final_ref, outputnode, [('outputnode.ref_image', 'asl_t1_ref')
                                         ]),
            (inputnode, cbf_to_t1w_transform, [('cbf', 'input_image')]),
            (cbf_to_t1w_transform, outputnode, [('output_image', 'cbf_t1')]),
            (inputnode, cbf_to_t1w_transform, [('itk_asl_to_t1', 'transforms')
                                               ]),
            (gen_ref, cbf_to_t1w_transform, [('out_file', 'reference_image')]),
            (inputnode, meancbf_to_t1w_transform, [('meancbf', 'input_image')
                                                   ]),
            (meancbf_to_t1w_transform, outputnode, [('output_image',
                                                     'meancbf_t1')]),
            (inputnode, meancbf_to_t1w_transform, [('itk_asl_to_t1',
                                                    'transforms')]),
            (gen_ref, meancbf_to_t1w_transform, [('out_file',
                                                  'reference_image')]),
        ])
    if cbft1space and scorescrub:
        score_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3,
            dimension=3),
                                         name='score_to_t1w_transform',
                                         mem_gb=mem_gb * 3 * omp_nthreads,
                                         n_procs=omp_nthreads)
        avgscore_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                            name='avgscore_to_t1w_transform',
                                            mem_gb=mem_gb * 3 * omp_nthreads,
                                            n_procs=omp_nthreads)
        scrub_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                         name='scrub_to_t1w_transform',
                                         mem_gb=mem_gb * 3 * omp_nthreads,
                                         n_procs=omp_nthreads)

        workflow.connect([
            (inputnode, score_to_t1w_transform, [('score', 'input_image')]),
            (score_to_t1w_transform, outputnode, [('output_image', 'score_t1')
                                                  ]),
            (inputnode, score_to_t1w_transform, [('itk_asl_to_t1',
                                                  'transforms')]),
            (gen_ref, score_to_t1w_transform, [('out_file', 'reference_image')
                                               ]),
            (inputnode, avgscore_to_t1w_transform, [('avgscore', 'input_image')
                                                    ]),
            (avgscore_to_t1w_transform, outputnode, [('output_image',
                                                      'avgscore_t1')]),
            (inputnode, avgscore_to_t1w_transform, [('itk_asl_to_t1',
                                                     'transforms')]),
            (gen_ref, avgscore_to_t1w_transform, [('out_file',
                                                   'reference_image')]),
            (inputnode, scrub_to_t1w_transform, [('scrub', 'input_image')]),
            (scrub_to_t1w_transform, outputnode, [('output_image', 'scrub_t1')
                                                  ]),
            (inputnode, scrub_to_t1w_transform, [('itk_asl_to_t1',
                                                  'transforms')]),
            (gen_ref, scrub_to_t1w_transform, [('out_file', 'reference_image')
                                               ]),
        ])

    if cbft1space and basil:
        basil_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                         name='basil_to_t1w_transform',
                                         mem_gb=mem_gb * 3 * omp_nthreads,
                                         n_procs=omp_nthreads)
        pv_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                      name='pv_to_t1w_transform',
                                      mem_gb=mem_gb * 3 * omp_nthreads,
                                      n_procs=omp_nthreads)
        att_to_t1w_transform = pe.Node(ApplyTransforms(
            interpolation="LanczosWindowedSinc",
            float=True,
            input_image_type=3),
                                       name='att_to_t1w_transform',
                                       mem_gb=mem_gb * 3 * omp_nthreads,
                                       n_procs=omp_nthreads)

        workflow.connect([
            (inputnode, basil_to_t1w_transform, [('basil', 'input_image')]),
            (basil_to_t1w_transform, outputnode, [('output_image', 'basil_t1')
                                                  ]),
            (inputnode, basil_to_t1w_transform, [('itk_asl_to_t1',
                                                  'transforms')]),
            (gen_ref, basil_to_t1w_transform, [('out_file', 'reference_image')
                                               ]),
            (inputnode, pv_to_t1w_transform, [('pv', 'input_image')]),
            (pv_to_t1w_transform, outputnode, [('output_image', 'pv_t1')]),
            (inputnode, pv_to_t1w_transform, [('itk_asl_to_t1', 'transforms')
                                              ]),
            (gen_ref, pv_to_t1w_transform, [('out_file', 'reference_image')]),
            (inputnode, att_to_t1w_transform, [('att', 'input_image')]),
            (att_to_t1w_transform, outputnode, [('output_image', 'att_t1')]),
            (inputnode, att_to_t1w_transform, [('itk_asl_to_t1', 'transforms')
                                               ]),
            (gen_ref, att_to_t1w_transform, [('out_file', 'reference_image')]),
        ])

    return workflow
Ejemplo n.º 10
0
def init_bold_preproc_trans_wf(
    mem_gb,
    omp_nthreads,
    name="bold_preproc_trans_wf",
    use_compression=True,
    use_fieldwarp=False,
    split_file=False,
    interpolation="LanczosWindowedSinc",
):
    """
    Resample in native (original) space.

    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from fprodents.workflows.bold.resampling import init_bold_preproc_trans_wf
            wf = init_bold_preproc_trans_wf(mem_gb=3, omp_nthreads=1)

    Parameters
    ----------
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_std_trans_wf``)
    use_compression : :obj:`bool`
        Save registered BOLD series as ``.nii.gz``
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from BOLD to MNI
    split_file : :obj:`bool`
        Whether the input file should be splitted (it is a 4D file)
        or it is a list of 3D files (default ``False``, do not split)
    interpolation : :obj:`str`
        Interpolation type to be used by ANTs' ``applyTransforms``
        (default ``'LanczosWindowedSinc'``)

    Inputs
    ------
    bold_file
        Individual 3D volumes, not motion corrected
    bold_mask
        Skull-stripping mask of reference image
    bold_ref
        BOLD reference image: an average-like 3D image of the time-series
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    bold
        BOLD series, resampled in native space, including all preprocessing

    """
    from bids.utils import listify
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.nilearn import Merge

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions""" if use_fieldwarp else """\
the transforms to correct for head-motion""")

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "name_source", "bold_file", "bold_mask", "bold_ref", "hmc_xforms",
            "fieldwarp"
        ]),
        name="inputnode",
    )

    outputnode = pe.Node(niu.IdentityInterface(fields=["bold"]),
                         name="outputnode")

    bold_transform = pe.Node(
        MultiApplyTransforms(interpolation=interpolation,
                             float=True,
                             copy_dtype=True),
        name="bold_transform",
        mem_gb=mem_gb * 3 * omp_nthreads,
        n_procs=omp_nthreads,
    )

    merge = pe.Node(Merge(compress=use_compression),
                    name="merge",
                    mem_gb=mem_gb * 3)

    # fmt:off
    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (bold_transform, merge, [('out_files', 'in_files')]),
        (inputnode, bold_transform, [(('hmc_xforms', listify), 'transforms'),
                                     ('bold_ref', 'reference_image')]),
        (merge, outputnode, [('out_file', 'bold')]),
    ])
    # fmt:on

    # Input file is not splitted
    if split_file:
        bold_split = pe.Node(FSLSplit(dimension="t"),
                             name="bold_split",
                             mem_gb=mem_gb * 3)
        # fmt:off
        workflow.connect([(inputnode, bold_split, [('bold_file', 'in_file')]),
                          (bold_split, bold_transform, [('out_files',
                                                         'input_image')])])
        # fmt:on
    else:
        # fmt:off
        workflow.connect([
            (inputnode, bold_transform, [('bold_file', 'input_image')]),
        ])
        # fmt:on

    return workflow
Ejemplo n.º 11
0
def init_apply_hmc_only_wf(mem_gb,
                           omp_nthreads,
                           name='apply_hmc_only',
                           use_compression=True,
                           split_file=False,
                           interpolation='LanczosWindowedSinc'):
    """
    Resample in native (original) space.
    This workflow resamples the input fMRI in its native (original)
    space in a "single shot" from the original BOLD series.
    Parameters
    ----------
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_std_trans_wf``)
    split_file : :obj:`bool`
        Whether the input file should be splitted (it is a 4D file)
        or it is a list of 3D files (default ``False``, do not split)
    interpolation : :obj:`str`
        Interpolation type to be used by ANTs' ``applyTransforms``
        (default ``'LanczosWindowedSinc'``)
    Inputs
    ------
    bold_file
        Individual 3D volumes, not motion corrected
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    Outputs
    -------
    bold
        BOLD series, resampled in native space, including all preprocessing
    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.nilearn import Merge
    from nipype.interfaces.fsl import Split as FSLSplit

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(transforms="""\
the transforms to correct for head-motion""")

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['name_source', 'bold_file', 'hmc_xforms']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['bold']),
                         name='outputnode')

    bold_transform = pe.Node(MultiApplyTransforms(interpolation=interpolation,
                                                  float=True,
                                                  copy_dtype=True),
                             name='bold_transform',
                             mem_gb=mem_gb * 3 * omp_nthreads,
                             n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (bold_transform, merge, [('out_files', 'in_files')]),
        (merge, outputnode, [('out_file', 'bold')]),
    ])

    # Input file is not splitted
    if split_file:
        bold_split = pe.Node(FSLSplit(dimension='t'),
                             name='bold_split',
                             mem_gb=mem_gb * 3)
        workflow.connect([(inputnode, bold_split, [('bold_file', 'in_file')]),
                          (bold_split, bold_transform, [
                              ('out_files', 'input_image'),
                              (('out_files', _first), 'reference_image'),
                          ])])
    else:
        workflow.connect([
            (inputnode, bold_transform, [('bold_file', 'input_image'),
                                         (('bold_file', _first),
                                          'reference_image')]),
        ])

    def _aslist(val):
        return [val]

    workflow.connect([
        (inputnode, bold_transform, [(('hmc_xforms', _aslist), 'transforms')]),
    ])
    return workflow
Ejemplo n.º 12
0
def init_func_preproc_wf(
    workdir=None, name="func_preproc_wf", fmap_type=None, memcalc=MemoryCalculator()
):
    """
    simplified from fmriprep/workflows/bold/base.py
    """

    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=["bold_file", "fmaps", "metadata", *in_attrs_from_anat_preproc_wf]
        ),
        name="inputnode",
    )

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=[
                *func_preproc_wf_out_attrs,
                "aroma_confounds",
                "aroma_metadata",
                "movpar_file",
                "rmsd_file",
                "skip_vols",
            ]
        ),
        name="outputnode",
    )

    def get_repetition_time(dic):
        return dic.get("RepetitionTime")

    metadatanode = pe.Node(niu.IdentityInterface(fields=["repetition_time"]), name="metadatanode")
    workflow.connect(
        [(inputnode, metadatanode, [(("metadata", get_repetition_time), "repetition_time")],)]
    )

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name="t1w_brain")
    workflow.connect(
        [(inputnode, t1w_brain, [("t1w_preproc", "in_file"), ("t1w_mask", "in_mask")])]
    )

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=config.nipype.omp_nthreads)
    bold_reference_wf.get_node("inputnode").inputs.dummy_scans = config.workflow.dummy_scans
    workflow.connect(inputnode, "bold_file", bold_reference_wf, "inputnode.bold_file")
    workflow.connect(bold_reference_wf, "outputnode.skip_vols", outputnode, "skip_vols")

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmap_type=fmap_type)
    workflow.connect(
        [
            (
                inputnode,
                bold_sdc_wf,
                [("fmaps", "inputnode.fmaps"), ("metadata", "inputnode.metadata")],
            ),
            (
                bold_reference_wf,
                bold_sdc_wf,
                [
                    ("outputnode.ref_image", "inputnode.epi_file"),
                    ("outputnode.ref_image_brain", "inputnode.epi_brain"),
                    ("outputnode.bold_mask", "inputnode.epi_mask"),
                ],
            ),
        ]
    )

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension="t"), name="bold_split", mem_gb=memcalc.series_gb * 3)
    workflow.connect(inputnode, "bold_file", bold_split, "in_file")

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(
        name="bold_hmc_wf", mem_gb=memcalc.series_gb, omp_nthreads=config.nipype.omp_nthreads,
    )
    workflow.connect(
        [
            (
                bold_reference_wf,
                bold_hmc_wf,
                [
                    ("outputnode.raw_ref_image", "inputnode.raw_ref_image"),
                    ("outputnode.bold_file", "inputnode.bold_file"),
                ],
            ),
            (
                bold_hmc_wf,
                outputnode,
                [("outputnode.movpar_file", "movpar_file"), ("outputnode.rmsd_file", "rmsd_file")],
            ),
        ]
    )

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        freesurfer=config.workflow.run_reconall,
        use_bbr=config.workflow.use_bbr,
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        mem_gb=memcalc.series_std_gb,
        omp_nthreads=config.nipype.omp_nthreads,
        use_compression=False,
    )
    workflow.connect(
        [
            (inputnode, bold_reg_wf, [("t1w_dseg", "inputnode.t1w_dseg")]),
            (t1w_brain, bold_reg_wf, [("out_file", "inputnode.t1w_brain")]),
            (bold_sdc_wf, bold_reg_wf, [("outputnode.epi_brain", "inputnode.ref_bold_brain")],),
        ]
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(
        name="bold_t1_trans_wf",
        freesurfer=config.workflow.run_reconall,
        use_fieldwarp=fmap_type is not None,
        multiecho=False,
        mem_gb=memcalc.series_std_gb,
        omp_nthreads=config.nipype.omp_nthreads,
        use_compression=False,
    )
    workflow.connect(
        [
            (
                inputnode,
                bold_t1_trans_wf,
                [("bold_file", "inputnode.name_source"), ("t1w_mask", "inputnode.t1w_mask")],
            ),
            (
                bold_sdc_wf,
                bold_t1_trans_wf,
                [
                    ("outputnode.epi_brain", "inputnode.ref_bold_brain"),
                    ("outputnode.epi_mask", "inputnode.ref_bold_mask"),
                    ("outputnode.out_warp", "inputnode.fieldwarp"),
                ],
            ),
            (t1w_brain, bold_t1_trans_wf, [("out_file", "inputnode.t1w_brain")]),
            (bold_split, bold_t1_trans_wf, [("out_files", "inputnode.bold_split")]),
            (bold_hmc_wf, bold_t1_trans_wf, [("outputnode.xforms", "inputnode.hmc_xforms")],),
            (
                bold_reg_wf,
                bold_t1_trans_wf,
                [("outputnode.itk_bold_to_t1", "inputnode.itk_bold_to_t1")],
            ),
        ]
    )

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=memcalc.series_std_gb,
        omp_nthreads=config.nipype.omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=True,
        name="bold_bold_trans_wf",
    )
    # bold_bold_trans_wf.inputs.inputnode.name_source = ref_file
    workflow.connect(
        [
            (inputnode, bold_bold_trans_wf, [("bold_file", "inputnode.name_source")]),
            (bold_split, bold_bold_trans_wf, [("out_files", "inputnode.bold_file")]),
            (bold_hmc_wf, bold_bold_trans_wf, [("outputnode.xforms", "inputnode.hmc_xforms")],),
            (
                bold_sdc_wf,
                bold_bold_trans_wf,
                [
                    ("outputnode.out_warp", "inputnode.fieldwarp"),
                    ("outputnode.epi_mask", "inputnode.bold_mask"),
                ],
            ),
        ]
    )

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_std_trans_wf = init_bold_std_trans_wf(
        freesurfer=config.workflow.run_reconall,
        mem_gb=memcalc.series_std_gb,
        omp_nthreads=config.nipype.omp_nthreads,
        spaces=config.workflow.spaces,
        name="bold_std_trans_wf",
        use_compression=not config.execution.low_mem,
        use_fieldwarp=fmap_type is not None,
    )
    workflow.connect(
        [
            (
                inputnode,
                bold_std_trans_wf,
                [
                    ("template", "inputnode.templates"),
                    ("anat2std_xfm", "inputnode.anat2std_xfm"),
                    ("bold_file", "inputnode.name_source"),
                ],
            ),
            (bold_split, bold_std_trans_wf, [("out_files", "inputnode.bold_split")]),
            (bold_hmc_wf, bold_std_trans_wf, [("outputnode.xforms", "inputnode.hmc_xforms")],),
            (
                bold_reg_wf,
                bold_std_trans_wf,
                [("outputnode.itk_bold_to_t1", "inputnode.itk_bold_to_t1")],
            ),
            (
                bold_bold_trans_wf,
                bold_std_trans_wf,
                [("outputnode.bold_mask", "inputnode.bold_mask")],
            ),
            (bold_sdc_wf, bold_std_trans_wf, [("outputnode.out_warp", "inputnode.fieldwarp")],),
            (
                bold_std_trans_wf,
                outputnode,
                [
                    ("outputnode.bold_std", "bold_std"),
                    ("outputnode.bold_std_ref", "bold_std_ref"),
                    ("outputnode.bold_mask_std", "bold_mask_std"),
                ],
            ),
        ]
    )

    ica_aroma_wf = init_ica_aroma_wf(
        mem_gb=memcalc.series_std_gb,
        metadata={"RepetitionTime": np.nan},
        omp_nthreads=config.nipype.omp_nthreads,
        use_fieldwarp=True,
        err_on_aroma_warn=config.workflow.aroma_err_on_warn,
        aroma_melodic_dim=config.workflow.aroma_melodic_dim,
        name="ica_aroma_wf",
    )
    ica_aroma_wf.get_node("ica_aroma").inputs.denoise_type = "no"
    ica_aroma_wf.remove_nodes([ica_aroma_wf.get_node("add_nonsteady")])
    workflow.connect(
        [
            (inputnode, ica_aroma_wf, [("bold_file", "inputnode.name_source")]),
            (
                metadatanode,
                ica_aroma_wf,
                [("repetition_time", "melodic.tr_sec",), ("repetition_time", "ica_aroma.TR")],
            ),
            (bold_hmc_wf, ica_aroma_wf, [("outputnode.movpar_file", "inputnode.movpar_file")]),
            (bold_reference_wf, ica_aroma_wf, [("outputnode.skip_vols", "inputnode.skip_vols")]),
            (
                bold_std_trans_wf,
                ica_aroma_wf,
                [
                    ("outputnode.bold_std", "inputnode.bold_std"),
                    ("outputnode.bold_mask_std", "inputnode.bold_mask_std"),
                    ("outputnode.spatial_reference", "inputnode.spatial_reference"),
                ],
            ),
            (
                ica_aroma_wf,
                outputnode,
                [
                    ("outputnode.aroma_noise_ics", "aroma_noise_ics"),
                    ("outputnode.melodic_mix", "melodic_mix"),
                    ("outputnode.nonaggr_denoised_file", "nonaggr_denoised_file"),
                    ("outputnode.aroma_confounds", "aroma_confounds"),
                    ("outputnode.aroma_metadata", "aroma_metadata"),
                ],
            ),
        ]
    )

    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=memcalc.series_std_gb,
        metadata={},
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name="bold_confounds_wf",
    )
    bold_confounds_wf.get_node("inputnode").inputs.t1_transform_flags = [False]
    workflow.connect(
        [
            (
                inputnode,
                bold_confounds_wf,
                [("t1w_tpms", "inputnode.t1w_tpms"), ("t1w_mask", "inputnode.t1w_mask")],
            ),
            (
                bold_hmc_wf,
                bold_confounds_wf,
                [
                    ("outputnode.movpar_file", "inputnode.movpar_file"),
                    ("outputnode.rmsd_file", "inputnode.rmsd_file"),
                ],
            ),
            (
                bold_reg_wf,
                bold_confounds_wf,
                [("outputnode.itk_t1_to_bold", "inputnode.t1_bold_xform")],
            ),
            (
                bold_reference_wf,
                bold_confounds_wf,
                [("outputnode.skip_vols", "inputnode.skip_vols")],
            ),
            (bold_confounds_wf, outputnode, [("outputnode.confounds_file", "confounds")]),
            (
                bold_confounds_wf,
                outputnode,
                [("outputnode.confounds_metadata", "confounds_metadata")],
            ),
            (
                metadatanode,
                bold_confounds_wf,
                [
                    ("repetition_time", "acompcor.repetition_time"),
                    ("repetition_time", "tcompcor.repetition_time"),
                ],
            ),
            (
                bold_bold_trans_wf,
                bold_confounds_wf,
                [
                    ("outputnode.bold", "inputnode.bold"),
                    ("outputnode.bold_mask", "inputnode.bold_mask"),
                ],
            ),
        ]
    )

    carpetplot_wf = init_carpetplot_wf(
        mem_gb=memcalc.series_std_gb,
        metadata={"RepetitionTime": np.nan},
        cifti_output=config.workflow.cifti_output,
        name="carpetplot_wf",
    )
    carpetplot_select_std = pe.Node(
        KeySelect(fields=["std2anat_xfm"], key="MNI152NLin2009cAsym"),
        name="carpetplot_select_std",
        run_without_submitting=True,
    )

    workflow.connect(
        [
            (
                inputnode,
                carpetplot_select_std,
                [("std2anat_xfm", "std2anat_xfm"), ("template", "keys")],
            ),
            (carpetplot_select_std, carpetplot_wf, [("std2anat_xfm", "inputnode.std2anat_xfm")],),
            (
                bold_bold_trans_wf,
                carpetplot_wf,
                [
                    ("outputnode.bold", "inputnode.bold"),
                    ("outputnode.bold_mask", "inputnode.bold_mask"),
                ],
            ),
            (
                bold_reg_wf,
                carpetplot_wf,
                [("outputnode.itk_t1_to_bold", "inputnode.t1_bold_xform")],
            ),
            (
                bold_confounds_wf,
                carpetplot_wf,
                [("outputnode.confounds_file", "inputnode.confounds_file")],
            ),
            (metadatanode, carpetplot_wf, [("repetition_time", "conf_plot.tr")]),
        ]
    )

    # Custom

    make_reportnode(workflow)
    assert workdir is not None
    make_reportnode_datasink(workflow, workdir)

    return workflow
Ejemplo n.º 13
0
def init_func_preproc_wf(bold_file):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fprodents.workflows.tests import mock_config
            from fprodents import config
            from fprodents.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    anat_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    anat_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix

    See Also
    --------

    * :py:func:`~fprodents.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fprodents.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fprodents.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fprodents.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fprodents.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fprodents.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~fprodents.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fprodents.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fprodents.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fprodents.workflows.bold.resampling.init_bold_surf_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from niworkflows.interfaces.nibabel import ApplyMask
    from niworkflows.interfaces.utility import KeySelect, DictMerge
    from nipype.interfaces.freesurfer.utils import LTAConvert

    from ...patch.utils import extract_entities

    mem_gb = {"filesize": 1, "resampled": 1, "largemem": 1}
    bold_tlen = 10

    # Have some options handy
    omp_nthreads = config.nipype.omp_nthreads
    spaces = config.workflow.spaces
    output_dir = str(config.execution.output_dir)

    # Extract BIDS entities and metadata from BOLD file(s)
    entities = extract_entities(bold_file)
    layout = config.execution.layout

    # Take first file as reference
    ref_file = pop_file(bold_file)
    metadata = layout.get_metadata(ref_file)

    echo_idxs = listify(entities.get("echo", []))
    multiecho = len(echo_idxs) > 2
    if len(echo_idxs) == 1:
        config.loggers.warning(
            f"Running a single echo <{ref_file}> from a seemingly multi-echo dataset."
        )
        bold_file = ref_file  # Just in case - drop the list

    if len(echo_idxs) == 2:
        raise RuntimeError(
            "Multi-echo processing requires at least three different echos (found two)."
        )

    if multiecho:
        # Drop echo entity for future queries, have a boolean shorthand
        entities.pop("echo", None)
        # reorder echoes from shortest to largest
        tes, bold_file = zip(*sorted([(layout.get_metadata(bf)["EchoTime"], bf)
                                      for bf in bold_file]))
        ref_file = bold_file[0]  # Reset reference to be the shortest TE

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        "Creating bold processing workflow for <%s> (%.2f GB / %d TRs). "
        "Memory resampled/largemem=%.2f/%.2f GB.",
        ref_file,
        mem_gb["filesize"],
        bold_tlen,
        mem_gb["resampled"],
        mem_gb["largemem"],
    )

    # Find associated sbref, if possible
    entities["suffix"] = "sbref"
    entities["extension"] = ["nii", "nii.gz"]  # Overwrite extensions
    sbref_files = layout.get(return_type="file", **entities)

    sbref_msg = f"No single-band-reference found for {os.path.basename(ref_file)}."
    if sbref_files and "sbref" in config.workflow.ignore:
        sbref_msg = "Single-band reference file(s) found and ignored."
    elif sbref_files:
        sbref_msg = "Using single-band reference file(s) {}.".format(",".join(
            [os.path.basename(sbf) for sbf in sbref_files]))
    config.loggers.workflow.info(sbref_msg)

    # Check whether STC must/can be run
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore)

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "bold_file",
            "ref_file",
            "bold_ref_xfm",
            "n_dummy_scans",
            "validation_report",
            "subjects_dir",
            "subject_id",
            "anat_preproc",
            "anat_mask",
            "anat_dseg",
            "anat_tpms",
            "anat2std_xfm",
            "std2anat_xfm",
            "template",
            "anat2fsnative_xfm",
            "fsnative2anat_xfm",
        ]),
        name="inputnode",
    )
    inputnode.inputs.bold_file = bold_file

    outputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "bold_mask",
            "bold_t1",
            "bold_t1_ref",
            "bold_mask_t1",
            "bold_std",
            "bold_std_ref",
            "bold_mask_std",
            "bold_native",
            "surfaces",
            "confounds",
            "aroma_noise_ics",
            "melodic_mix",
            "nonaggr_denoised_file",
            "confounds_metadata",
        ]),
        name="outputnode",
    )

    # Generate a brain-masked conversion of the t1w and bold reference images
    t1w_brain = pe.Node(ApplyMask(), name="t1w_brain")

    lta_convert = pe.Node(LTAConvert(out_fsl=True, out_itk=True),
                          name="lta_convert")

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=["bold_file"]),
                         name="boldbuffer")

    summary = pe.Node(
        FunctionalSummary(
            slice_timing=run_stc,
            registration="FSL",
            registration_dof=config.workflow.bold2t1w_dof,
            registration_init=config.workflow.bold2t1w_init,
            pe_direction=metadata.get("PhaseEncodingDirection"),
            echo_idx=echo_idxs,
            tr=metadata.get("RepetitionTime"),
            distortion_correction="<not implemented>",
        ),
        name="summary",
        mem_gb=config.DEFAULT_MEMORY_MIN_GB,
        run_without_submitting=True,
    )
    summary.inputs.dummy_scans = config.workflow.dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        metadata=metadata,
        output_dir=output_dir,
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
    )

    # fmt:off
    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])
    # fmt:on

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension="t"),
                         name="bold_split",
                         mem_gb=mem_gb["filesize"] * 3)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        mem_gb=mem_gb["resampled"],
        name="bold_reg_wf",
        omp_nthreads=omp_nthreads,
        use_compression=False,
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(
        name="bold_t1_trans_wf",
        use_fieldwarp=False,
        multiecho=multiecho,
        mem_gb=mem_gb["resampled"],
        omp_nthreads=omp_nthreads,
        use_compression=False,
    )

    t1w_mask_bold_tfm = pe.Node(ApplyTransforms(interpolation="MultiLabel"),
                                name="t1w_mask_bold_tfm",
                                mem_gb=0.1)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb["largemem"],
        metadata=metadata,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name="bold_confounds_wf",
    )
    bold_confounds_wf.get_node("inputnode").inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb["resampled"],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=False,
        name="bold_bold_trans_wf",
    )
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc:
        bold_stc_wf = init_bold_stc_wf(name="bold_stc_wf", metadata=metadata)
        # fmt:off
        workflow.connect([
            (inputnode, bold_stc_wf, [("n_dummy_scans", "inputnode.skip_vols")
                                      ]),
            (bold_stc_wf, boldbuffer, [("outputnode.stc_file", "bold_file")]),
        ])
        # fmt:on
        if not multiecho:
            # fmt:off
            workflow.connect([(inputnode, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
            # fmt:on
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name="meepi_echos")
            meepi_echos.iterables = ("bold_file", bold_file)
            # fmt:off
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
            # fmt:on
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        # fmt:off
        workflow.connect([(inputnode, boldbuffer, [('bold_file', 'bold_file')])
                          ])
        # fmt:on
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ("bold_file", bold_file)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from niworkflows.func.util import init_skullstrip_bold_wf

        skullstrip_bold_wf = init_skullstrip_bold_wf(name="skullstrip_bold_wf")

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=["bold_files"]),
            joinsource=("meepi_echos" if run_stc is True else "boldbuffer"),
            joinfield=["bold_files"],
            name="join_echos",
        )

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(
            echo_times=tes,
            mem_gb=mem_gb["resampled"],
            omp_nthreads=omp_nthreads,
            name="bold_t2smap_wf",
        )

        # fmt:off
        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])
        # fmt:on

    # MAIN WORKFLOW STRUCTURE #######################################################
    # fmt:off
    workflow.connect([
        (inputnode, bold_reg_wf, [('anat_preproc', 'inputnode.t1w_brain'),
                                  ('ref_file', 'inputnode.ref_bold_brain')]),
        (inputnode, t1w_brain, [('anat_preproc', 'in_file'),
                                ('anat_mask', 'in_mask')]),
        # convert bold reference LTA transform to other formats
        (inputnode, lta_convert, [('bold_ref_xfm', 'in_lta')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        (inputnode, summary, [('n_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('anat_mask', 'inputnode.t1w_mask'),
                                       ('ref_file', 'inputnode.ref_bold_brain')
                                       ]),
        (t1w_brain, bold_t1_trans_wf, [('out_file', 'inputnode.t1w_brain')]),
        (lta_convert, bold_t1_trans_wf, [('out_itk', 'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.bold2anat',
                                          'inputnode.bold2anat')]),
        (bold_t1_trans_wf, outputnode, [('outputnode.bold_t1', 'bold_t1'),
                                        ('outputnode.bold_t1_ref',
                                         'bold_t1_ref')]),
        # transform T1 mask to BOLD
        (inputnode, t1w_mask_bold_tfm, [('anat_mask', 'input_image'),
                                        ('ref_file', 'reference_image')]),
        (bold_reg_wf, t1w_mask_bold_tfm, [('outputnode.anat2bold',
                                           'transforms')]),
        (t1w_mask_bold_tfm, outputnode, [('output_image', 'bold_mask')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('anat_tpms', 'inputnode.anat_tpms'),
                                        ('anat_mask', 'inputnode.t1w_mask')]),
        (lta_convert, bold_confounds_wf, [('out_fsl', 'inputnode.movpar_file')
                                          ]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.anat2bold',
                                           'inputnode.anat2bold')]),
        (inputnode, bold_confounds_wf, [('n_dummy_scans',
                                         'inputnode.skip_vols')]),
        (t1w_mask_bold_tfm, bold_confounds_wf, [('output_image',
                                                 'inputnode.bold_mask')]),
        (bold_confounds_wf, outputnode, [('outputnode.confounds_file',
                                          'confounds')]),
        (bold_confounds_wf, outputnode, [('outputnode.confounds_metadata',
                                          'confounds_metadata')]),
        # Connect bold_bold_trans_wf
        (inputnode, bold_bold_trans_wf, [('ref_file', 'inputnode.bold_ref')]),
        (t1w_mask_bold_tfm, bold_bold_trans_wf, [('output_image',
                                                  'inputnode.bold_mask')]),
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')
                                          ]),
        (lta_convert, bold_bold_trans_wf, [('out_itk', 'inputnode.hmc_xforms')
                                           ]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])
    # fmt:on

    # for standard EPI data, pass along correct file
    if not multiecho:
        # fmt:off
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [('outputnode.bold',
                                                      'inputnode.bold')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
        # fmt:on
    else:  # for meepi, create and use optimal combination
        # fmt:off
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf, [('outputnode.bold',
                                               'inputnode.bold')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])
        # fmt:on

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(("T1w", "anat")):
        from niworkflows.interfaces.fixes import (
            FixHeaderApplyTransforms as ApplyTransforms, )

        boldmask_to_t1w = pe.Node(
            ApplyTransforms(interpolation="MultiLabel"),
            name="boldmask_to_t1w",
            mem_gb=0.1,
        )
        # fmt:off
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.bold2anat',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (t1w_mask_bold_tfm, boldmask_to_t1w, [('output_image',
                                                   'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])
        # fmt:on

    if nonstd_spaces.intersection(("func", "run", "bold", "boldref", "sbref")):
        # fmt:off
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])
        # fmt:on

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            mem_gb=mem_gb["resampled"],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name="bold_std_trans_wf",
            use_compression=not config.execution.low_mem,
            use_fieldwarp=False,
        )
        # fmt:off
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('template', 'inputnode.templates'),
              ('anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source')]),
            (t1w_mask_bold_tfm, bold_std_trans_wf, [('output_image',
                                                     'inputnode.bold_mask')]),
            (lta_convert, bold_std_trans_wf, [('out_itk',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.bold2anat',
                                               'inputnode.bold2anat')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])
        # fmt:on

        if not multiecho:
            # fmt:off
            workflow.connect([
                (bold_split, bold_std_trans_wf, [("out_files",
                                                  "inputnode.bold_split")]),
            ])
            # fmt:on
        else:
            split_opt_comb = bold_split.clone(name="split_opt_comb")
            # fmt:off
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
            # fmt:on

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        # fmt:off
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])
        # fmt:on

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf

            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb["resampled"],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                use_fieldwarp=False,
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name="ica_aroma_wf",
            )

            join = pe.Node(
                niu.Function(output_names=["out_file"], function=_to_join),
                name="aroma_confounds",
            )

            mrg_conf_metadata = pe.Node(
                niu.Merge(2),
                name="merge_confound_metadata",
                run_without_submitting=True,
            )
            mrg_conf_metadata2 = pe.Node(
                DictMerge(),
                name="merge_confound_metadata2",
                run_without_submitting=True,
            )

            # fmt:off
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf,
                 [('bold_file', 'inputnode.name_source'),
                  ('n_dummy_scans', 'inputnode.skip_vols')]),
                (lta_convert, ica_aroma_wf, [('out_fsl',
                                              'inputnode.movpar_file')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
            ])
            # fmt:on

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb["resampled"],
            metadata=metadata,
            name="carpetplot_wf",
        )
        # Xform to 'Fischer344' is always computed.
        carpetplot_select_std = pe.Node(
            KeySelect(fields=["std2anat_xfm"], key="Fischer344"),
            name="carpetplot_select_std",
            run_without_submitting=True,
        )

        # fmt:off
        workflow.connect([
            (inputnode, carpetplot_select_std, [('std2anat_xfm',
                                                 'std2anat_xfm'),
                                                ('template', 'keys')]),
            (carpetplot_select_std, carpetplot_wf,
             [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             carpetplot_wf, [('outputnode.bold', 'inputnode.bold')]),
            (t1w_mask_bold_tfm, carpetplot_wf, [('output_image',
                                                 'inputnode.bold_mask')]),
            (bold_reg_wf, carpetplot_wf, [('outputnode.anat2bold',
                                           'inputnode.anat2bold')]),
            (bold_confounds_wf, carpetplot_wf, [('outputnode.confounds_file',
                                                 'inputnode.confounds_file')]),
        ])
        # fmt:on

    # REPORTING ############################################################
    ds_report_summary = pe.Node(
        DerivativesDataSink(desc="summary",
                            datatype="figures",
                            dismiss_entities=("echo", )),
        name="ds_report_summary",
        run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB,
    )

    ds_report_validation = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            desc="validation",
            datatype="figures",
            dismiss_entities=("echo", ),
        ),
        name="ds_report_validation",
        run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB,
    )

    # fmt:off
    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (inputnode, ds_report_validation, [('validation_report', 'in_file')]),
    ])
    # fmt:on

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split(".")[-1].startswith("ds_report"):
            workflow.get_node(node).inputs.base_directory = output_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 14
0
def init_func_preproc_wf(
    #aroma_melodic_dim,
    bold2t1w_dof,
    bold_file,
    cifti_output,
    debug,
    dummy_scans,
    #err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    ignore,
    low_mem,
    medial_surface_nan,
    omp_nthreads,
    output_dir,
    #regressors_all_comps,
    #regressors_dvars_th,
    #regressors_fd_th,
    reportlets_dir,
    spaces,
    t2s_coreg,
    #use_aroma,
    use_bbr,
    use_syn,
    pcasl,
    #aslcontext,
    layout=None,
    num_bold=1,
):
    """
    This workflow controls the functional preprocessing stages of *ASLPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from collections import namedtuple
            from ...niworkflows.utils.spaces import SpatialReferences
            from aslprep.workflows.bold import init_func_preproc_wf
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            wf = init_func_preproc_wf(
                aroma_melodic_dim=-200,
                bold2t1w_dof=9,
                bold_file='/completely/made/up/path/sub-01_task-nback_bold.nii.gz',
                cifti_output=False,
                debug=False,
                dummy_scans=None,
                err_on_aroma_warn=False,
                fmap_bspline=True,
                fmap_demean=True,
                force_syn=True,
                freesurfer=True,
                ignore=[],
                low_mem=False,
                medial_surface_nan=False,
                omp_nthreads=1,
                output_dir='.',
                regressors_all_comps=False,
                regressors_dvars_th=1.5,
                regressors_fd_th=0.5,
                reportlets_dir='.',
                spaces=SpatialReferences(
                    spaces=['MNI152Lin',
                            ('fsaverage', {'density': '10k'}),
                            'T1w',
                            'fsnative'],
                    checkpoint=True),
                t2s_coreg=False,
                use_aroma=False,
                use_bbr=True,
                use_syn=True,
                layout=BIDSLayout('.'),
                num_bold=1,
            )

    Parameters
    ----------
    aroma_melodic_dim : :obj:`int`
        Maximum number of components identified by MELODIC within ICA-AROMA
        (default is -200, ie. no limitation).
    bold2t1w_dof : 6, 9 or 12
        Degrees-of-freedom for BOLD-T1w registration
    bold_file : :obj:`str`
        BOLD series NIfTI file
    cifti_output : :obj:`bool`
        Generate bold CIFTI file in output spaces
    debug : :obj:`bool`
        Enable debugging outputs
    dummy_scans : :obj:`int` or None
        Number of volumes to consider as non steady state
    err_on_aroma_warn : :obj:`bool`
        Do not crash on ICA-AROMA errors
    fmap_bspline : :obj:`bool`
        **Experimental**: Fit B-Spline field using least-squares
    fmap_demean : :obj:`bool`
        Demean voxel-shift map during unwarp
    force_syn : :obj:`bool`
        **Temporary**: Always run SyN-based SDC
    freesurfer : :obj:`bool`
        Enable FreeSurfer functional registration (bbregister) and resampling
        BOLD series to FreeSurfer surface meshes.
    ignore : :obj:`list`
        Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
    low_mem : :obj:`bool`
        Write uncompressed .nii files in some cases to reduce memory usage
    medial_surface_nan : :obj:`bool`
        Replace medial wall values with NaNs on functional GIFTI files
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    output_dir : :obj:`str`
        Directory in which to save derivatives
    regressors_all_comps : :obj:`bool`
        Return all CompCor component time series instead of the top fraction
    regressors_dvars_th : :obj:`float`
        Criterion for flagging DVARS outliers
    regressors_fd_th : :obj:`float`
        Criterion for flagging framewise displacement outliers
    reportlets_dir : :obj:`str`
        Absolute path of a directory in which reportlets will be temporarily stored
    spaces : :py:class:`~...niworkflows.utils.spaces.SpatialReferences`
        A container for storing, organizing, and parsing spatial normalizations. Composed of
        :py:class:`~...niworkflows.utils.spaces.Reference` objects representing spatial references.
        Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
        (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
        (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
        the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
        dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
        would lead to resampling on a 2mm resolution of the space).
    t2s_coreg : :obj:`bool`
        For multiecho EPI, use the calculated T2*-map for T2*-driven coregistration
    use_aroma : :obj:`bool`
        Perform ICA-AROMA on MNI-resampled functional series
    use_bbr : :obj:`bool` or None
        Enable/disable boundary-based registration refinement.
        If ``None``, test BBR result for distortion before accepting.
        When using ``t2s_coreg``, BBR will be enabled by default unless
        explicitly specified otherwise.
    use_syn : :obj:`bool`
        **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
        If fieldmaps are present and enabled, this is not run, by default.
    layout : :py:class:`~bids.layout.BIDSLayout`
        BIDSLayout structure to enable metadata retrieval
    num_bold : :obj:`int`
        Total number of BOLD files that have been set for preprocessing
        (default is 1)

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_brain
        Skull-stripped ``t1w_preproc``
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        Name of the template (parametric)
    anat2std_xfm
        ANTs-compatible affine-and-warp transform file (parametric)
    std2anat_xfm
        ANTs-compatible affine-and-warp transform file (inverse) (parametric)
    joint_template
        List of templates to target
    joint_anat2std_xfm
        List of transform files, collated with templates
    joint_std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~...niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~aslprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~aslprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~aslprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~aslprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~aslprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~aslprep.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~aslprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~aslprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~aslprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~aslprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~aslprep.workflows.bold.cbf.init_cbf_compt_wf`
    * :py:func:`~aslprep.workflows.bold.cbf.init_cbfqc_compt_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    LOGGER.log(
        25, ('Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
             'Memory resampled/largemem=%.2f/%.2f GB.'), ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # For doc building purposes
    if not hasattr(layout, 'parse_file_entities'):
        LOGGER.log(25, 'No valid layout: building empty workflow.')
        metadata = {
            'RepetitionTime': 2.0,
            'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
            'PhaseEncodingDirection': 'j',
        }
        fmaps = {
            'phasediff': [{
                'phases':
                [('sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz', {
                    'EchoTime1': 0.0,
                    'EchoTime2': 0.00246
                })],
                'magnitude':
                [('sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
                  {}),
                 ('sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz', {})
                 ]
            }]
        }
        run_stc = True
        multiecho = False
    else:
        # Find associated sbref, if possible
        entities = layout.parse_file_entities(ref_file)
        file1 = os.path.abspath(bold_file)
        aslcontext = file1.replace('.nii.gz', '_ASLContext.tsv')
        entities['suffix'] = 'sbref'
        entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
        files = layout.get(return_type='file', **entities)
        refbase = os.path.basename(ref_file)
        if 'sbref' in ignore:
            LOGGER.info("Single-band reference files ignored.")
        elif files and multiecho:
            LOGGER.warning("Single-band reference found, but not supported in "
                           "multi-echo workflows at this time. Ignoring.")
        elif files:
            sbref_file = files[0]
            sbbase = os.path.basename(sbref_file)
            if len(files) > 1:
                LOGGER.warning(
                    "Multiple single-band reference files found for {}; using "
                    "{}".format(refbase, sbbase))
            else:
                LOGGER.log(
                    25, "Using single-band reference file {}".format(sbbase))
        else:
            LOGGER.log(25,
                       "No single-band-reference found for {}".format(refbase))

        metadata = layout.get_metadata(ref_file)

        # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
        fmaps = None
        if 'fieldmaps' not in ignore:
            fmaps = fieldmap_wrangler(layout,
                                      ref_file,
                                      use_syn=use_syn,
                                      force_syn=force_syn)
        elif use_syn or force_syn:
            # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
            fmaps = {'syn': False}

        # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
        run_stc = (bool(metadata.get("SliceTiming"))
                   and 'slicetiming' not in ignore
                   and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if t2s_coreg and not multiecho:
        LOGGER.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if t2s_coreg and use_bbr is None:
        use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__desc__ = """

Functional data preprocessing

: For each of the {num_bold} ASL runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=num_bold)

    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'subjects_dir', 'subject_id', 't1w_preproc', 't1w_brain',
        't1w_mask', 't1w_dseg', 't1w_tpms', 't1w_aseg', 't1w_aparc',
        'anat2std_xfm', 'std2anat_xfm', 'template', 'joint_anat2std_xfm',
        'joint_std2anat_xfm', 'joint_template', 't1w2fsnative_xfm',
        'fsnative2t1w_xfm'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from ...niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_std', 'bold_std_ref', 'bold_mask_std',
        'bold_aseg_std', 'bold_aparc_std', 'bold_native', 'bold_cifti',
        'cbf_t1', 'cbf_std', 'meancbf_t1', 'meancbf_std', 'score_t1',
        'score_std', 'avgscore_t1', 'avgscore_std', 'avgscore_cifti',
        'scrub_t1', 'scrub_std', 'basil_t1', 'basil_std', 'pv_t1', 'pv_std',
        'pv_native', 'cifti_variant', 'cifti_metadata', 'cifti_density',
        'surfaces', 'confounds', 'aroma_noise_ics', 'melodic_mix',
        'nonaggr_denoised_file', 'confounds_metadata', 'qc_file'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=bold2t1w_dof,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=output_dir,
        spaces=spaces,
        #use_aroma=use_aroma,
    )

    workflow.connect([
        (
            outputnode,
            func_derivatives_wf,
            [
                ('bold_t1', 'inputnode.bold_t1'),
                ('bold_t1_ref', 'inputnode.bold_t1_ref'),
                ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
                ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
                ('bold_mask_t1', 'inputnode.bold_mask_t1'),
                ('bold_native', 'inputnode.bold_native'),
                ('confounds', 'inputnode.confounds'),
                ('surfaces', 'inputnode.surf_files'),
                #('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
                #('melodic_mix', 'inputnode.melodic_mix'),
                #('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
                ('bold_cifti', 'inputnode.bold_cifti'),
                ('cifti_variant', 'inputnode.cifti_variant'),
                ('cifti_metadata', 'inputnode.cifti_metadata'),
                ('cifti_density', 'inputnode.cifti_density'),
                ('confounds_metadata', 'inputnode.confounds_metadata'),
                ('cbf_t1', 'inputnode.cbf_t1'),
                ('meancbf_t1', 'inputnode.meancbf_t1'),
                ('score_t1', 'inputnode.score_t1'),
                ('avgscore_t1', 'inputnode.avgscore_t1'),
                ('scrub_t1', 'inputnode.scrub_t1'),
                ('basil_t1', 'inputnode.basil_t1'),
                ('pv_t1', 'inputnode.pv_t1'),
                #('qc_file', 'inputnode.qc_file'),
            ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file',
                                             'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=use_bbr,
                                   bold2t1w_dof=bold2t1w_dof,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # compute  the CBF here
    compt_cbf_wf = init_cbf_compt_wf(name='compt_cbf_wf',
                                     mem_gb=mem_gb['filesize'],
                                     omp_nthreads=omp_nthreads,
                                     pcasl=pcasl,
                                     metadata=metadata,
                                     aslcontext=aslcontext)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=bool(fmaps),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        #regressors_all_comps=regressors_all_comps,
        #regressors_fd_th=regressors_fd_th,
        #regressors_dvars_th=regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not low_mem,
        use_fieldwarp=bool(fmaps),
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(bold_reference_wf, bold_stc_wf, [
                ('outputnode.bold_file', 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(bold_reference_wf, boldbuffer,
                           [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmaps,
                                       metadata,
                                       omp_nthreads=omp_nthreads,
                                       debug=debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from ...niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [('outputnode.algo_dummy_scans',
                                       'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1w_brain', 'inputnode.t1w_brain'),
                ('t1w_dseg', 'inputnode.t1w_dseg'),
                # Undefined if --fs-no-reconall, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')
            ]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1w_brain', 'inputnode.t1w_brain'),
                                       ('t1w_mask', 'inputnode.t1w_mask'),
                                       ('t1w_aseg', 'inputnode.t1w_aseg'),
                                       ('t1w_aparc', 'inputnode.t1w_aparc')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (inputnode, bold_sdc_wf, [('t1w_brain', 'inputnode.t1w_brain')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.epi_file'),
          ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
          ('outputnode.bold_mask', 'inputnode.epi_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [('outputnode.out_warp',
                                          'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.epi_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                        ('t1w_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # cbf computation workflow
    workflow.connect([
        (bold_bold_trans_wf, compt_cbf_wf,
         [('outputnode.bold', 'inputnode.bold'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (inputnode, compt_cbf_wf, [('t1w_tpms', 'inputnode.t1w_tpms')]),
        (bold_reg_wf, compt_cbf_wf, [('outputnode.itk_t1_to_bold',
                                      'inputnode.t1_bold_xform')]),
        (bold_reg_wf, compt_cbf_wf, [('outputnode.itk_bold_to_t1',
                                      'inputnode.itk_bold_to_t1')]),
        (inputnode, compt_cbf_wf, [('t1w_mask', 'inputnode.t1w_mask')]),
        #(compt_cbf_wf,func_derivatives_wf,[('outputnode.qc_file','inputnode.qc_file')]),
    ])

    # register  bold to t1w

    if not t2s_coreg:
        workflow.connect([
            (bold_sdc_wf, bold_reg_wf, [('outputnode.epi_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_sdc_wf, bold_t1_trans_wf,
             [('outputnode.epi_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.epi_mask', 'inputnode.ref_bold_mask')]),
        ])
    else:
        workflow.connect([
            # For t2s_coreg, replace EPI-to-T1w registration inputs
            (bold_t2s_wf, bold_reg_wf, [('outputnode.bold_ref_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_t2s_wf, bold_t1_trans_wf,
             [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1w_dseg',
                                                 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.epi_corrected',
                                                   'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'aslprep'

        for node in bold_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                bold_sdc_wf.get_node(node).interface.out_path_base = 'aslprep'

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(KeySelect(fields=['std2anat_xfm']),
                                     name='sdc_select_std',
                                     run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('joint_std2anat_xfm',
                                              'std2anat_xfm'),
                                             ('joint_template', 'keys')]),
                (sdc_select_std, bold_sdc_wf, [('std2anat_xfm',
                                                'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1w_dseg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [('outputnode.syn_ref',
                                                      'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'aslprep'

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from ...niworkflows.interfaces.fixes import (FixHeaderApplyTransforms
                                                     as ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])
        workflow.connect([
            (compt_cbf_wf, bold_t1_trans_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
                ('outputnode.out_score', 'inputnode.score'),
                ('outputnode.out_avgscore', 'inputnode.avgscore'),
                ('outputnode.out_scrub', 'inputnode.scrub'),
                ('outputnode.out_cbfb', 'inputnode.basil'),
                ('outputnode.out_cbfpv', 'inputnode.pv'),
            ]),
            (bold_t1_trans_wf, outputnode, [
                ('outputnode.cbf_t1', 'cbf_t1'),
                ('outputnode.meancbf_t1', 'meancbf_t1'),
                ('outputnode.score_t1', 'score_t1'),
                ('outputnode.avgscore_t1', 'avgscore_t1'),
                ('outputnode.scrub_t1', 'scrub_t1'),
                ('outputnode.basil_t1', 'basil_t1'),
                ('outputnode.pv_t1', 'pv_t1'),
            ]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):

        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
            (compt_cbf_wf, func_derivatives_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
                ('outputnode.out_score', 'inputnode.score'),
                ('outputnode.out_avgscore', 'inputnode.avgscore'),
                ('outputnode.out_scrub', 'inputnode.scrub'),
                ('outputnode.out_cbfb', 'inputnode.basil'),
                ('outputnode.out_cbfpv', 'inputnode.pv'),
            ]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('joint_template', 'inputnode.templates'),
              ('joint_anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('t1w_aseg', 'inputnode.bold_aseg'),
              ('t1w_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             bold_std_trans_wf, [('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
            (compt_cbf_wf, bold_std_trans_wf, [
                ('outputnode.out_cbf', 'inputnode.cbf'),
                ('outputnode.out_mean', 'inputnode.meancbf'),
                ('outputnode.out_score', 'inputnode.score'),
                ('outputnode.out_avgscore', 'inputnode.avgscore'),
                ('outputnode.out_scrub', 'inputnode.scrub'),
                ('outputnode.out_cbfb', 'inputnode.basil'),
                ('outputnode.out_cbfpv', 'inputnode.pv'),
            ]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        # Xform to 'MNI152NLin2009cAsym' is always computed.
        carpetplot_select_std = pe.Node(KeySelect(fields=['std2anat_xfm'],
                                                  key='MNI152NLin2009cAsym'),
                                        name='carpetplot_select_std',
                                        run_without_submitting=True)

        carpetplot_wf = init_carpetplot_wf(mem_gb=mem_gb['resampled'],
                                           metadata=metadata,
                                           name='carpetplot_wf')

        workflow.connect([
            (inputnode, carpetplot_select_std, [('joint_std2anat_xfm',
                                                 'std2anat_xfm'),
                                                ('joint_template', 'keys')]),
            (carpetplot_select_std, carpetplot_wf,
             [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             carpetplot_wf, [('outputnode.bold', 'inputnode.bold'),
                             ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
            (bold_confounds_wf, carpetplot_wf, [('outputnode.confounds_file',
                                                 'inputnode.confounds_file')]),
        ])

        if not multiecho:
            workflow.connect([(bold_split, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                ('outputnode.cbf_std', 'inputnode.cbf_std'),
                ('outputnode.meancbf_std', 'inputnode.meancbf_std'),
                ('outputnode.score_std', 'inputnode.score_std'),
                ('outputnode.avgscore_std', 'inputnode.avgscore_std'),
                ('outputnode.scrub_std', 'inputnode.scrub_std'),
                ('outputnode.basil_std', 'inputnode.basil_std'),
                ('outputnode.pv_std', 'inputnode.pv_std'),
            ]),
        ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        LOGGER.log(25, 'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(mem_gb=mem_gb['resampled'],
                                         surface_spaces=freesurfer_spaces,
                                         medial_surface_nan=medial_surface_nan,
                                         name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1w_preproc', 'inputnode.t1w_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [('outputnode.target',
                                                  'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [('subjects_dir',
                                                'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf, [
                    ('outputnode.bold_std', 'inputnode.bold_std'),
                    ('outputnode.spatial_reference',
                     'inputnode.spatial_reference'),
                ]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode,
                 [('outputnode.cifti_bold', 'bold_cifti'),
                  ('outputnode.cifti_variant', 'cifti_variant'),
                  ('outputnode.cifti_metadata', 'cifti_metadata'),
                  ('outputnode.cifti_density', 'cifti_density')]),
            ])

    compt_qccbf_wf = init_cbfqc_compt_wf(name='compt_qccbf_wf',
                                         mem_gb=mem_gb['filesize'],
                                         omp_nthreads=omp_nthreads,
                                         bold_file=bold_file,
                                         metadata=metadata)
    workflow.connect([
        (bold_bold_trans_wf, compt_qccbf_wf, [('outputnode.bold_mask',
                                               'inputnode.bold_mask')]),
        (inputnode, compt_qccbf_wf, [('t1w_tpms', 'inputnode.t1w_tpms')]),
        (bold_reg_wf, compt_qccbf_wf, [('outputnode.itk_t1_to_bold',
                                        'inputnode.t1_bold_xform')]),
        (inputnode, compt_qccbf_wf, [('t1w_mask', 'inputnode.t1w_mask')]),
        (compt_cbf_wf, compt_qccbf_wf,
         [('outputnode.out_mean', 'inputnode.meancbf'),
          ('outputnode.out_avgscore', 'inputnode.avgscore'),
          ('outputnode.out_scrub', 'inputnode.scrub'),
          ('outputnode.out_cbfb', 'inputnode.basil'),
          ('outputnode.out_cbfpv', 'inputnode.pv')]),
        (bold_confounds_wf, compt_qccbf_wf, [('outputnode.confounds_file',
                                              'inputnode.confmat')]),
        (compt_qccbf_wf, outputnode, [('outputnode.qc_file', 'qc_file')]),
        (compt_qccbf_wf, func_derivatives_wf, [('outputnode.qc_file',
                                                'inputnode.qc_file')]),
    ])
    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        workflow.connect([
            (bold_std_trans_wf, compt_qccbf_wf, [('outputnode.bold_mask_std',
                                                  'inputnode.bold_mask_std')]),
        ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(desc='summary',
                                                    keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='validation', keep_dtype=True),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Ejemplo n.º 15
0
def init_bold_t1_trans_wf(
    mem_gb,
    omp_nthreads,
    multiecho=False,
    use_fieldwarp=False,
    use_compression=True,
    name="bold_t1_trans_wf",
):
    """
    Co-register the reference BOLD image to T1w-space.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fprodents.workflows.bold.registration import init_bold_t1_trans_wf
            wf = init_bold_t1_trans_wf(mem_gb=3,
                                       omp_nthreads=1)

    Parameters
    ----------
    use_fieldwarp : :obj:`bool`
        Include SDC warp in single-shot transform from BOLD to T1
    multiecho : :obj:`bool`
        If multiecho data was supplied, HMC already performed
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    use_compression : :obj:`bool`
        Save registered BOLD series as ``.nii.gz``
    name : :obj:`str`
        Name of workflow (default: ``bold_reg_wf``)

    Inputs
    ------
    name_source
        BOLD series NIfTI file
        Used to recover original information lost during processing
    ref_bold_brain
        Reference image to which BOLD series is aligned
        If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
    t1w_brain
        Skull-stripped bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    bold_split
        Individual 3D BOLD volumes, not motion corrected
    hmc_xforms
        List of affine transforms aligning each volume to ``ref_image`` in ITK format
    bold2anat
        Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
    fieldwarp
        a :abbr:`DFM (displacements field map)` in ITK format

    Outputs
    -------
    bold_t1
        Motion-corrected BOLD series in T1 space
    bold_t1_ref
        Reference, contrast-enhanced summary of the motion-corrected BOLD series in T1w space


    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from niworkflows.interfaces.itk import MultiApplyTransforms
    from niworkflows.interfaces.nibabel import GenerateSamplingReference
    from niworkflows.interfaces.nilearn import Merge

    workflow = Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "name_source",
            "ref_bold_brain",
            "t1w_brain",
            "t1w_mask",
            "bold_split",
            "fieldwarp",
            "hmc_xforms",
            "bold2anat",
        ]),
        name="inputnode",
    )

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=["bold_t1", "bold_t1_ref", "bold_mask_t1"]),
        name="outputnode",
    )

    gen_ref = pe.Node(GenerateSamplingReference(), name="gen_ref",
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB

    bold_ref_t1w_tfm = pe.Node(
        ApplyTransforms(interpolation="LanczosWindowedSinc"),
        name="bold_ref_t1w_tfm",
        mem_gb=0.1)

    # fmt:off
    workflow.connect([
        (inputnode, gen_ref, [('ref_bold_brain', 'moving_image'),
                              ('t1w_brain', 'fixed_image'),
                              ('t1w_mask', 'fov_mask')]),
        (inputnode, bold_ref_t1w_tfm, [('ref_bold_brain', 'input_image')]),
        (gen_ref, bold_ref_t1w_tfm, [('out_file', 'reference_image')]),
        (inputnode, bold_ref_t1w_tfm, [('bold2anat', 'transforms')]),
        (bold_ref_t1w_tfm, outputnode, [('output_image', 'bold_t1_ref')]),
    ])
    # fmt:on

    bold_to_t1w_transform = pe.Node(
        MultiApplyTransforms(interpolation="LanczosWindowedSinc",
                             float=True,
                             copy_dtype=True),
        name="bold_to_t1w_transform",
        mem_gb=mem_gb * 3 * omp_nthreads,
        n_procs=omp_nthreads,
    )

    # merge 3D volumes into 4D timeseries
    merge = pe.Node(Merge(compress=use_compression),
                    name="merge",
                    mem_gb=mem_gb)

    if not multiecho:
        # Merge transforms placing the head motion correction last
        nforms = 2 + int(use_fieldwarp)
        merge_xforms = pe.Node(
            niu.Merge(nforms),
            name="merge_xforms",
            run_without_submitting=True,
            mem_gb=DEFAULT_MEMORY_MIN_GB,
        )
        if use_fieldwarp:
            # fmt:off
            workflow.connect([(inputnode, merge_xforms, [('fieldwarp', 'in2')])
                              ])
            # fmt:on

        # fmt:off
        workflow.connect([
            # merge transforms
            (inputnode, merge_xforms, [('hmc_xforms', 'in%d' % nforms),
                                       ('bold2anat', 'in1')]),
            (merge_xforms, bold_to_t1w_transform, [('out', 'transforms')]),
            (inputnode, bold_to_t1w_transform, [('bold_split', 'input_image')
                                                ]),
        ])
        # fmt:on
    else:
        from nipype.interfaces.fsl import Split as FSLSplit

        bold_split = pe.Node(FSLSplit(dimension="t"),
                             name="bold_split",
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

        # fmt:off
        workflow.connect([
            (inputnode, bold_split, [('bold_split', 'in_file')]),
            (bold_split, bold_to_t1w_transform, [('out_files', 'input_image')
                                                 ]),
            (inputnode, bold_to_t1w_transform, [('bold2anat', 'transforms')]),
        ])
        # fmt:on

    # fmt:off
    workflow.connect([
        (inputnode, merge, [('name_source', 'header_source')]),
        (gen_ref, bold_to_t1w_transform, [('out_file', 'reference_image')]),
        (bold_to_t1w_transform, merge, [('out_files', 'in_files')]),
        (merge, outputnode, [('out_file', 'bold_t1')]),
    ])
    # fmt:on

    return workflow