Esempio n. 1
0
def spectrum_ts_table():
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    import numpy as np
    import os

    LUT = np.genfromtxt(os.path.join(os.environ["FREESURFER_HOME"],
                                     'FreeSurferColorLUT.txt'),
                        dtype=str)
    roinum = LUT[:, 0]
    roiname = LUT[:, 1]

    wf = pe.Workflow(name="spectra_and_timeseries")
    inputspec = pe.Node(util.IdentityInterface(fields=['stats_file', 'tr']),
                        name='inputspec')
    spectra = pe.MapNode(util.Function(input_names=['Timeseries', 'tr'],
                                       output_names=['figure'],
                                       function=plot_spectrum),
                         name='spectra',
                         iterfield=['Timeseries'])
    timeseries = pe.MapNode(util.Function(input_names=['Timeseries'],
                                          output_names=['title'],
                                          function=plot_simple_timeseries),
                            name='timeseries',
                            iterfield=['Timeseries'])

    def stats(stats_file):
        import numpy as np
        Stats = []
        for stat in stats_file:
            Stats.append(np.recfromcsv(stat).tolist())
        return Stats

    def make_table(roiname, roinum, spectra, timeseries, stats):
        import numpy as np
        imagetable = [['ROI', 'Timeseries', 'Spectra']]

        for i, R in enumerate(stats):
            title = roiname[roinum == str(np.int_(R[0]))][0]
            imagetable.append([title, timeseries[i], spectra[i]])
        return imagetable

    table = pe.MapNode(util.Function(
        input_names=['roiname', 'roinum', 'spectra', 'timeseries', 'stats'],
        output_names=['imagetable'],
        function=make_table),
                       name='maketable',
                       iterfield=['spectra', 'stats', 'timeseries'])

    wf.connect(inputspec, ('stats_file', stats), spectra, 'Timeseries')
    wf.connect(inputspec, ('stats_file', stats), timeseries, 'Timeseries')
    wf.connect(inputspec, ('stats_file', stats), table, 'stats')
    wf.connect(inputspec, 'tr', spectra, 'tr')
    wf.connect(spectra, 'figure', table, 'spectra')
    wf.connect(timeseries, 'title', table, 'timeseries')
    table.inputs.roiname = roiname
    table.inputs.roinum = roinum

    outputspec = pe.Node(util.IdentityInterface(fields=['imagetable']),
                         name='outputspec')
    wf.connect(table, 'imagetable', outputspec, 'imagetable')

    return wf
Esempio n. 2
0
Setup preprocessing workflow
----------------------------

This is a generic fsl feat preprocessing workflow encompassing skull stripping,
motion correction and smoothing operations.

"""

preproc = pe.Workflow(name='preproc')
"""
Set up a node to define all inputs required for the preprocessing workflow
"""

inputnode = pe.Node(interface=util.IdentityInterface(
    fields=['func', 'fssubject_id', 'surf_dir']),
                    name='inputspec')
"""
Convert functional images to float representation. Since there can be more than
one functional run we use a MapNode to convert each run.
"""

img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
                                                op_string='',
                                                suffix='_dtype'),
                       iterfield=['in_file'],
                       name='img2float')
preproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the middle volume of the first run as the reference
"""
Esempio n. 3
0
def output_to_standard(workflow, output_name, strat, num_strat, pipeline_config_obj,
                        map_node=False, input_image_type=0):

    nodes = strat.get_nodes_names()

    if 'apply_ants_warp_functional_to_standard' in nodes:

        # ANTS WARP APPLICATION

        # convert the func-to-anat linear warp from FSL FLIRT to
        # ITK (ANTS) format
        fsl_to_itk_convert = create_wf_c3d_fsl_to_itk(input_image_type,
                                                      map_node,
                                                      name='{0}_fsl_to_itk_{1}'.format(output_name, num_strat))

        # collect the list of warps into a single stack to feed into the
        # ANTS warp apply tool
        collect_transforms = create_wf_collect_transforms(map_node,
                                                          name='{0}_collect_transforms_{1}'.format(output_name, num_strat))

        # ANTS apply warp
        apply_ants_warp = create_wf_apply_ants_warp(map_node,
                                                    name='{0}_to_standard_{1}'.format(
                                                        output_name, num_strat),
                                                    ants_threads=int(pipeline_config_obj.num_ants_threads))

        apply_ants_warp.inputs.inputspec.dimension = 3
        apply_ants_warp.inputs.inputspec.interpolation = 'Linear'
        apply_ants_warp.inputs.inputspec.reference_image = \
            pipeline_config_obj.template_brain_only_for_func

        apply_ants_warp.inputs.inputspec.input_image_type = \
            input_image_type

        # affine from FLIRT func->anat linear registration
        node, out_file = strat['functional_to_anat_linear_xfm']
        workflow.connect(node, out_file, fsl_to_itk_convert,
                            'inputspec.affine_file')

        # reference used in FLIRT func->anat linear registration
        node, out_file = strat['anatomical_brain']
        workflow.connect(node, out_file, fsl_to_itk_convert,
                            'inputspec.reference_file')

        # output file to be converted
        node, out_file = \
            strat[output_name]
        workflow.connect(node, out_file, fsl_to_itk_convert,
                            'inputspec.source_file')

        # nonlinear warp from anatomical->template ANTS registration
        node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
        workflow.connect(node, out_file, collect_transforms,
                            'inputspec.warp_file')

        # linear initial from anatomical->template ANTS registration
        node, out_file = strat['ants_initial_xfm']
        workflow.connect(node, out_file, collect_transforms,
                            'inputspec.linear_initial')

        # linear affine from anatomical->template ANTS registration
        node, out_file = strat['ants_affine_xfm']
        workflow.connect(node, out_file, collect_transforms,
                            'inputspec.linear_affine')

        # rigid affine from anatomical->template ANTS registration
        node, out_file = strat['ants_rigid_xfm']
        workflow.connect(node, out_file, collect_transforms,
                            'inputspec.linear_rigid')

        # converted FLIRT func->anat affine, now in ITK (ANTS) format
        workflow.connect(fsl_to_itk_convert,
                            'outputspec.itk_transform',
                            collect_transforms,
                            'inputspec.fsl_to_itk_affine')

        # output file to be converted
        node, out_file = strat[output_name]
        workflow.connect(node, out_file, apply_ants_warp,
                            'inputspec.input_image')

        # collection of warps to be applied to the output file
        workflow.connect(collect_transforms,
                            'outputspec.transformation_series',
                            apply_ants_warp,
                            'inputspec.transforms')

        strat.update_resource_pool({
            '{0}_to_standard'.format(output_name): (apply_ants_warp, 'outputspec.output_image')
        })

        strat.append_name(apply_ants_warp.name)

        num_strat += 1

    else:
        # FSL WARP APPLICATION
        if map_node:
            apply_fsl_warp = pe.MapNode(interface=fsl.ApplyWarp(),
                                        name='{0}_to_standard_{1}'.format(output_name, num_strat),
                                        iterfield=['in_file'])
        else:
            apply_fsl_warp = pe.Node(interface=fsl.ApplyWarp(),
                                        name='{0}_to_standard_{1}'.format(output_name,
                                                                        num_strat))

        apply_fsl_warp.inputs.ref_file = \
            pipeline_config_obj.template_skull_for_func

        # output file to be warped
        node, out_file = strat[output_name]
        workflow.connect(node, out_file, apply_fsl_warp, 'in_file')

        # linear affine from func->anat linear FLIRT registration
        node, out_file = strat['functional_to_anat_linear_xfm']
        workflow.connect(node, out_file, apply_fsl_warp, 'premat')

        # nonlinear warp from anatomical->template FNIRT registration
        node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
        workflow.connect(node, out_file, apply_fsl_warp, 'field_file')

        strat.update_resource_pool({'{0}_to_standard'.format(output_name): (apply_fsl_warp, 'out_file')})
        strat.append_name(apply_fsl_warp.name)

    return strat
Esempio n. 4
0
def init_dualregression_wf(
    workdir=None, feature=None, map_files=None, map_spaces=None, memcalc=MemoryCalculator()
):
    """
    create a workflow to calculate dual regression for ICA seeds
    """
    if feature is not None:
        name = f"{formatlikebids(feature.name)}_wf"
    else:
        name = "dualregression_wf"
    workflow = pe.Workflow(name=name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=[
                "tags",
                "vals",
                "metadata",
                "bold",
                "mask",
                "confounds_selected",
                "map_names",
                "map_files",
                "map_spaces",
            ]
        ),
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]), name="outputnode")

    if feature is not None:
        inputnode.inputs.map_names = feature.maps

    if map_files is not None:
        inputnode.inputs.map_files = map_files

    if map_spaces is not None:
        inputnode.inputs.map_spaces = map_spaces

    #
    statmaps = ["effect", "variance", "z", "dof", "mask"]
    make_resultdicts_a = pe.Node(
        MakeResultdicts(tagkeys=["feature", "map"], imagekeys=["design_matrix", "contrast_matrix"]),
        name="make_resultdicts_a",
        run_without_submitting=True
    )
    if feature is not None:
        make_resultdicts_a.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_a, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_a, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_a, "metadata")
    workflow.connect(inputnode, "map_names", make_resultdicts_a, "map")
    make_resultdicts_b = pe.Node(
        MakeResultdicts(
            tagkeys=["feature", "map", "component"],
            imagekeys=statmaps,
            metadatakeys=["sources", "mean_t_s_n_r"],
        ),
        name="make_resultdicts_b",
        run_without_submitting=True
    )
    if feature is not None:
        make_resultdicts_b.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_b, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_b, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_b, "metadata")
    workflow.connect(inputnode, "map_names", make_resultdicts_b, "map")
    workflow.connect(inputnode, "mask", make_resultdicts_b, "mask")

    workflow.connect(make_resultdicts_b, "resultdicts", outputnode, "resultdicts")

    #
    merge_resultdicts = pe.Node(niu.Merge(2), name="merge_resultdicts", run_without_submitting=True)
    workflow.connect(make_resultdicts_a, "resultdicts", merge_resultdicts, "in1")
    workflow.connect(make_resultdicts_b, "resultdicts", merge_resultdicts, "in2")
    resultdict_datasink = pe.Node(
        ResultdictDatasink(base_directory=workdir), name="resultdict_datasink"
    )
    workflow.connect(merge_resultdicts, "out", resultdict_datasink, "indicts")

    #
    reference_dict = dict(reference_space=constants.reference_space, reference_res=constants.reference_res)
    resample = pe.MapNode(
        Resample(interpolation="LanczosWindowedSinc", **reference_dict),
        name="resample",
        iterfield=["input_image", "input_space"],
        n_procs=config.nipype.omp_nthreads,
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "map_files", resample, "input_image")
    workflow.connect(inputnode, "map_spaces", resample, "input_space")

    # Delete zero voxels for the maps
    applymask = pe.MapNode(
        fsl.ApplyMask(), name="applymask", iterfield="in_file", mem_gb=memcalc.volume_std_gb,
    )
    workflow.connect(inputnode, "mask", applymask, "mask_file")
    workflow.connect(resample, "output_image", applymask, "in_file")

    # first step, calculate spatial regression of ICA components on to the
    # bold file
    spatialglm = pe.MapNode(
        fsl.GLM(out_file="beta", demean=True),
        name="spatialglm",
        iterfield="design",
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect(applymask, "out_file", spatialglm, "design")
    workflow.connect(inputnode, "bold", spatialglm, "in_file")
    workflow.connect(inputnode, "mask", spatialglm, "mask")

    # second step, calculate the temporal regression of the time series
    # from the first step on to the bold file
    contrasts = pe.MapNode(
        niu.Function(
            input_names=["map_timeseries_file", "confounds_file"],
            output_names=["out_with_header", "out_no_header", "map_component_names"],
            function=_contrasts,
        ),
        iterfield="map_timeseries_file",
        name="contrasts",
        run_without_submitting=True
    )
    workflow.connect(spatialglm, "out_file", contrasts, "map_timeseries_file")
    workflow.connect(inputnode, "confounds_selected", contrasts, "confounds_file")

    workflow.connect(contrasts, "out_with_header", make_resultdicts_a, "contrast_matrix")
    workflow.connect(contrasts, "map_component_names", make_resultdicts_b, "component")

    design = pe.MapNode(MergeColumns(2), iterfield=["in1", "column_names1"], name="design")
    workflow.connect(spatialglm, "out_file", design, "in1")
    workflow.connect(contrasts, "map_component_names", design, "column_names1")
    workflow.connect(inputnode, "confounds_selected", design, "in2")

    workflow.connect(design, "out_with_header", make_resultdicts_a, "design_matrix")

    fillna = pe.MapNode(FillNA(), iterfield="in_tsv", name="fillna")
    workflow.connect(design, "out_no_header", fillna, "in_tsv")

    temporalglm = pe.MapNode(
        fsl.GLM(
            out_file="beta.nii.gz",
            out_cope="cope.nii.gz",
            out_varcb_name="varcope.nii.gz",
            out_z_name="zstat.nii.gz",
            demean=True,
        ),
        name="temporalglm",
        iterfield=["design", "contrasts"],
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect(inputnode, "bold", temporalglm, "in_file")
    workflow.connect(inputnode, "mask", temporalglm, "mask")
    workflow.connect(fillna, "out_no_header", temporalglm, "design")
    workflow.connect(contrasts, "out_no_header", temporalglm, "contrasts")

    # make dof volume
    makedofvolume = pe.MapNode(MakeDofVolume(), iterfield=["design"], name="makedofvolume",)
    workflow.connect(inputnode, "bold", makedofvolume, "bold_file")
    workflow.connect(fillna, "out_no_header", makedofvolume, "design")

    for glmattr, resultattr in (("cope", "effect"), ("varcb", "variance"), ("z", "z")):
        split = pe.MapNode(
            fsl.Split(dimension="t"), iterfield="in_file", name=f"split{resultattr}images"
        )
        workflow.connect(temporalglm, f"out_{glmattr}", split, "in_file")
        workflow.connect(split, "out_files", make_resultdicts_b, resultattr)
    workflow.connect(makedofvolume, "out_file", make_resultdicts_b, "dof")

    #
    tsnr = pe.Node(nac.TSNR(), name="tsnr", mem_gb=memcalc.series_std_gb)
    workflow.connect(inputnode, "bold", tsnr, "in_file")

    maxintensity = pe.MapNode(
        MaxIntensity(), iterfield="in_file", name="maxintensity", mem_gb=memcalc.series_std_gb
    )
    workflow.connect(resample, "output_image", maxintensity, "in_file")

    calcmean = pe.MapNode(
        CalcMean(), iterfield="parcellation", name="calcmean", mem_gb=memcalc.series_std_gb
    )
    workflow.connect(maxintensity, "out_file", calcmean, "parcellation")
    workflow.connect(tsnr, "tsnr_file", calcmean, "in_file")

    workflow.connect(calcmean, "mean", make_resultdicts_b, "mean_t_s_n_r")

    return workflow
Esempio n. 5
0
def create_temporal_reg(wflow_name='temporal_reg', which='SR'):

    """
    Temporal multiple regression workflow
    Provides a spatial map of parameter estimates corresponding to each 
    provided timeseries in a timeseries.txt file as regressors
    
    Parameters
    ----------

    wflow_name : a string
        Name of the temporal regression workflow

    which: a string
        SR: Spatial Regression, RT: ROI Timeseries
        
        NOTE: If you set (which = 'RT'), the output of this workflow will be
        renamed based on the header information provided in the
        timeseries.txt file.
        If you run the temporal regression workflow manually, don\'t set 
        (which = 'RT') unless you provide a timeseries.txt file with a header
        containing the names of the timeseries.
        
    Returns
    -------

    wflow : workflow

        temporal multiple regression Workflow



    Notes
    -----

    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/sca/sca.py>`_

    Workflow Inputs::

        inputspec.subject_rest : string (existing nifti file)
            Band passed Image with Global Signal , white matter, csf and motion regression. Recommended bandpass filter (0.001,0.1) )

        inputspec.subject_timeseries : string (existing txt file)
            text file containing the timeseries to be regressed on the subjects
            functional file 
            timeseries are organized by columns, timepoints by rows
        
        inputspec.subject_mask : string (existing nifti file)
            path to subject functional mask
            
        inputspec.demean : Boolean
            control whether to demean model and data
            
        inputspec.normalize : Boolean
            control whether to normalize the input timeseries to unit standard deviation



    Workflow Outputs::

        outputspec.temp_reg_map : string (nifti file)
            GLM parameter estimate image for each timeseries in the input file

        outputspec.temp_reg_map_zstat : string (nifti file)
            Normalized version of the GLM parameter estimates


    Temporal Regression Workflow Procedure:
    
    Enter all timeseries into a general linear model and regress these 
    timeseries to the subjects functional file to get spatial maps of voxels
    showing activation patterns related to those in the timeseries.
    

    Workflow:

    .. image:: ../images/create_temporal_regression.png
        :width: 500

    Detailed Workflow:

    .. image:: ../images/detailed_graph_create_temporal_regression.png
        :width: 500    

    References
    ----------
    `http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide>`_

    Examples
    --------

    >>> tr_wf = create_temporal_reg('temporal regression')
    >>> tr_wf.inputs.inputspec.subject_rest = '/home/data/subject/func/rest_bandpassed.nii.gz'
    >>> tr_wf.inputs.inputspec.subject_timeseries = '/home/data/subject/func/timeseries.txt'
    >>> tr_wf.inputs.inputspec.subject_mask = '/home/data/spatialmaps/spatial_map.nii.gz'
    >>> tr_wf.inputs.inputspec.demean = True
    >>> tr_wf.inputs.inputspec.normalize = True
    >>> tr_wf.run() # doctest: +SKIP

    """
    
    wflow = pe.Workflow(name=wflow_name)

    inputNode = pe.Node(util.IdentityInterface
                        (fields=['subject_rest',
                                 'subject_timeseries',
                                 'subject_mask',
                                 'demean',
                                 'normalize']),
                        name='inputspec')

    outputNode = pe.Node(util.IdentityInterface
                         (fields=['temp_reg_map',
                                  'temp_reg_map_files',
                                  'temp_reg_map_z',
                                  'temp_reg_map_z_files']),
                          name='outputspec')

    check_timeseries = pe.Node(util.Function(input_names=['in_file'],
                                             output_names=['out_file'],
                                             function=check_ts),
                               name='check_timeseries')

    wflow.connect(inputNode, 'subject_timeseries',
                  check_timeseries, 'in_file')

    temporalReg = pe.Node(interface=fsl.GLM(),
                          name='temporal_regression')

    temporalReg.inputs.out_file = 'temp_reg_map.nii.gz'
    temporalReg.inputs.out_z_name = 'temp_reg_map_z.nii.gz'

    wflow.connect(inputNode, 'subject_rest',
                  temporalReg, 'in_file')
    wflow.connect(check_timeseries, 'out_file',
                  temporalReg, 'design')
    wflow.connect(inputNode, 'demean',
                  temporalReg, 'demean')
    wflow.connect(inputNode, 'normalize',
                  temporalReg, 'des_norm')
    wflow.connect(inputNode, 'subject_mask',
                  temporalReg, 'mask')


    wflow.connect(temporalReg, 'out_file',
                  outputNode, 'temp_reg_map')
    wflow.connect(temporalReg, 'out_z',
                  outputNode, 'temp_reg_map_z')


    split = pe.Node(interface=fsl.Split(),
                    name='split_raw_volumes')
    split.inputs.dimension = 't'
    split.inputs.out_base_name = 'temp_reg_map_'

    wflow.connect(temporalReg, 'out_file',
                  split, 'in_file')


    split_zstat = pe.Node(interface=fsl.Split(),
                    name='split_zstat_volumes')
    split_zstat.inputs.dimension = 't'
    split_zstat.inputs.out_base_name = 'temp_reg_map_z_'

    wflow.connect(temporalReg, 'out_z',
                  split_zstat, 'in_file')

    if which == 'SR':
        wflow.connect(split, 'out_files',
                      outputNode, 'temp_reg_map_files')
        wflow.connect(split_zstat, 'out_files',
                      outputNode, 'temp_reg_map_z_files')

    elif which == 'RT':

        # get roi order and send to output node for raw outputs
        get_roi_order = pe.Node(util.Function(input_names=['maps',
                                                           'timeseries'],
                                              output_names=['labels',
                                                            'maps'],
                                              function=map_to_roi),
                                name='get_roi_order')

        wflow.connect(split, 'out_files',
                      get_roi_order, 'maps')

        wflow.connect(inputNode, 'subject_timeseries',
                      get_roi_order, 'timeseries')

        rename_maps = pe.MapNode(interface=util.Rename(),
                                 name='rename_maps',
                                 iterfield=['in_file',
                                              'format_string'])
        rename_maps.inputs.keep_ext = True

        wflow.connect(get_roi_order, 'labels',
                      rename_maps, 'format_string')
        wflow.connect(get_roi_order, 'maps',
                      rename_maps, 'in_file')

        wflow.connect(rename_maps, 'out_file',
                      outputNode, 'temp_reg_map_files')


        # get roi order and send to output node for z-stat outputs
        get_roi_order_zstat = pe.Node(util.Function(input_names=['maps',
                                                           'timeseries'],
                                              output_names=['labels',
                                                            'maps'],
                                              function=map_to_roi),
                                name='get_roi_order_zstat')

        wflow.connect(split_zstat, 'out_files',
                      get_roi_order_zstat, 'maps')

        wflow.connect(inputNode, 'subject_timeseries',
                      get_roi_order_zstat, 'timeseries')

        rename_maps_zstat = pe.MapNode(interface=util.Rename(),
                                 name='rename_maps_zstat',
                                 iterfield=['in_file',
                                            'format_string'])
        rename_maps_zstat.inputs.keep_ext = True

        wflow.connect(get_roi_order_zstat, 'labels',
                      rename_maps_zstat, 'format_string')
        wflow.connect(get_roi_order_zstat, 'maps',
                      rename_maps_zstat, 'in_file')

        wflow.connect(rename_maps_zstat, 'out_file',
                      outputNode, 'temp_reg_map_z_files')


    return wflow
Esempio n. 6
0
def init_single_subject_wf(
    debug,
    freesurfer,
    hires,
    layout,
    longitudinal,
    low_mem,
    name,
    omp_nthreads,
    output_dir,
    output_spaces,
    reportlets_dir,
    skull_strip_fixed_seed,
    skull_strip_template,
    subject_id,
):
    """
    Create a single subject workflow.

    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and functional preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Functional preprocessing is performed using a separate workflow for each
    individual BOLD series.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from collections import OrderedDict, namedtuple
            from smriprep.workflows.base import init_single_subject_wf
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            wf = init_single_subject_wf(
                debug=False,
                freesurfer=True,
                hires=True,
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                name='single_subject_wf',
                omp_nthreads=1,
                output_dir='.',
                output_spaces=OrderedDict([('MNI152NLin2009cAsym', {}),
                                           ('fsaverage5', {})]),
                reportlets_dir='.',
                skull_strip_fixed_seed=False,
                skull_strip_template=('OASIS30ANTs', {}),
                subject_id='test',
            )

    Parameters
    ----------
    debug : bool
        Enable debugging outputs
    freesurfer : bool
        Enable FreeSurfer surface reconstruction (may increase runtime)
    hires : bool
        Enable sub-millimeter preprocessing in FreeSurfer
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : bool
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : bool
        Write uncompressed .nii files in some cases to reduce memory usage
    name : str
        Name of workflow
    omp_nthreads : int
        Maximum number of threads an individual process may use
    output_dir : str
        Directory in which to save derivatives
    output_spaces : OrderedDict
        List of spatial normalization targets. Some parts of pipeline will
        only be instantiated for some output spaces. Valid spaces:
        - Any template identifier from TemplateFlow
        - Path to a template folder organized following TemplateFlow's
        conventions
    reportlets_dir : str
        Directory in which to save reportlets
    skull_strip_fixed_seed : bool
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    skull_strip_template : tuple
        Name of ANTs skull-stripping template (e.g., 'OASIS30ANTs') and
        dictionary of template specifications.
    subject_id : str
        List of subject labels

    Inputs
    ------
    subjects_dir
        FreeSurfer SUBJECTS_DIR

    """
    from ..interfaces.reports import AboutSummary, SubjectSummary
    if name in ('single_subject_wf', 'single_subject_smripreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
        }
    else:
        subject_data = collect_data(layout, subject_id)[0]

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *sMRIPprep* {smriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(smriprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

For more details of the pipeline, see [the section corresponding
to workflows in *sMRIPrep*'s documentation]\
(https://smriprep.readthedocs.io/en/latest/workflows.html \
"sMRIPrep's documentation").


### References

"""

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=True),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(bids_dir=layout.root),
                        name='bids_info',
                        run_without_submitting=True)

    summary = pe.Node(SubjectSummary(output_spaces=list(output_spaces.keys())),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='summary', keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='about', keep_dtype=True),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        bids_root=layout.root,
        debug=debug,
        freesurfer=freesurfer,
        hires=hires,
        longitudinal=longitudinal,
        name="anat_preproc_wf",
        num_t1w=len(subject_data['t1w']),
        omp_nthreads=omp_nthreads,
        output_dir=output_dir,
        output_spaces=output_spaces,
        reportlets_dir=reportlets_dir,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        skull_strip_template=skull_strip_template,
    )

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w')]),
        (bids_info, summary, [('subject', 'subject_id')]),
        (bids_info, anat_preproc_wf, [(('subject', _prefix),
                                       'inputnode.subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    return workflow
Esempio n. 7
0
def test_create_bedpostx_pipeline():
    fsl_course_dir = os.path.abspath('fsl_course_data')

    mask_file = os.path.join(fsl_course_dir,
                             "fdt/subj1.bedpostX/nodif_brain_mask.nii.gz")
    bvecs_file = os.path.join(fsl_course_dir, "fdt/subj1/bvecs")
    bvals_file = os.path.join(fsl_course_dir, "fdt/subj1/bvals")
    dwi_file = os.path.join(fsl_course_dir, "fdt/subj1/data.nii.gz")

    nipype_bedpostx = create_bedpostx_pipeline("nipype_bedpostx")
    nipype_bedpostx.inputs.inputnode.dwi = dwi_file
    nipype_bedpostx.inputs.inputnode.mask = mask_file
    nipype_bedpostx.inputs.inputnode.bvecs = bvecs_file
    nipype_bedpostx.inputs.inputnode.bvals = bvals_file
    nipype_bedpostx.inputs.xfibres.n_fibres = 2
    nipype_bedpostx.inputs.xfibres.fudge = 1
    nipype_bedpostx.inputs.xfibres.burn_in = 1000
    nipype_bedpostx.inputs.xfibres.n_jumps = 1250
    nipype_bedpostx.inputs.xfibres.sample_every = 25

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        original_bedpostx = pe.Node(interface=fsl.BEDPOSTX(),
                                    name="original_bedpostx")
    original_bedpostx.inputs.dwi = dwi_file
    original_bedpostx.inputs.mask = mask_file
    original_bedpostx.inputs.bvecs = bvecs_file
    original_bedpostx.inputs.bvals = bvals_file
    original_bedpostx.inputs.environ['FSLPARALLEL'] = ""
    original_bedpostx.inputs.fibres = 2
    original_bedpostx.inputs.weight = 1
    original_bedpostx.inputs.burn_period = 1000
    original_bedpostx.inputs.jumps = 1250
    original_bedpostx.inputs.sampling = 25

    test_f1 = pe.Node(util.AssertEqual(), name="mean_f1_test")
    test_f2 = pe.Node(util.AssertEqual(), name="mean_f2_test")
    test_th1 = pe.Node(util.AssertEqual(), name="mean_th1_test")
    test_th2 = pe.Node(util.AssertEqual(), name="mean_th2_test")
    test_ph1 = pe.Node(util.AssertEqual(), name="mean_ph1_test")
    test_ph2 = pe.Node(util.AssertEqual(), name="mean_ph2_test")

    pipeline = pe.Workflow(name="test_bedpostx")
    pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_bedpostx_")

    def pickFirst(l):
        return l[0]

    def pickSecond(l):
        return l[1]

    pipeline.connect([
        (nipype_bedpostx, test_f1, [(("outputnode.mean_fsamples", pickFirst),
                                     "volume1")]),
        (nipype_bedpostx, test_f2, [(("outputnode.mean_fsamples", pickSecond),
                                     "volume1")]),
        (nipype_bedpostx, test_th1, [(("outputnode.mean_thsamples", pickFirst),
                                      "volume1")]),
        (nipype_bedpostx, test_th2, [(("outputnode.mean_thsamples",
                                       pickSecond), "volume1")]),
        (nipype_bedpostx, test_ph1, [(("outputnode.mean_phsamples", pickFirst),
                                      "volume1")]),
        (nipype_bedpostx, test_ph2, [(("outputnode.mean_phsamples",
                                       pickSecond), "volume1")]),
        (original_bedpostx, test_f1, [(("mean_fsamples", pickFirst), "volume2")
                                      ]),
        (original_bedpostx, test_f2, [(("mean_fsamples", pickSecond),
                                       "volume2")]),
        (original_bedpostx, test_th1, [(("mean_thsamples", pickFirst),
                                        "volume2")]),
        (original_bedpostx, test_th2, [(("mean_thsamples", pickSecond),
                                        "volume2")]),
        (original_bedpostx, test_ph1, [(("mean_phsamples", pickFirst),
                                        "volume2")]),
        (original_bedpostx, test_ph2, [(("mean_phsamples", pickSecond),
                                        "volume2")])
    ])

    pipeline.run(plugin='Linear')
    shutil.rmtree(pipeline.base_dir)
Esempio n. 8
0
def create_wf_edit_func(wf_name="edit_func"):
    """Workflow to edit the scan to the proscribed TRs.
    
    Workflow Inputs::

        inputspec.func : func file or a list of func/rest nifti file
            User input functional(T2*) Image

        inputspec.start_idx : string
            Starting volume/slice of the functional image (optional)

        inputspec.stop_idx : string
            Last volume/slice of the functional image (optional)

    Workflow Outputs::

        outputspec.edited_func : string (nifti file)
            Path to Output image with the initial few slices dropped


    Order of commands:

    - Get the start and the end volume index of the functional run. If not defined by the user, return the first and last volume.

        get_idx(in_files, stop_idx, start_idx)

    - Dropping the initial TRs. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::

        3dcalc -a rest.nii.gz[4..299]
               -expr 'a'
               -prefix rest_3dc.nii.gz

    """

    # allocate a workflow object
    preproc = pe.Workflow(name=wf_name)

    # configure the workflow's input spec
    inputNode = pe.Node(
        util.IdentityInterface(fields=['func', 'start_idx', 'stop_idx']),
        name='inputspec')

    # configure the workflow's output spec
    outputNode = pe.Node(util.IdentityInterface(fields=['edited_func']),
                         name='outputspec')

    # allocate a node to check that the requested edits are
    # reasonable given the data
    func_get_idx = pe.Node(util.Function(
        input_names=['in_files', 'stop_idx', 'start_idx'],
        output_names=['stopidx', 'startidx'],
        function=get_idx),
                           name='func_get_idx')

    # wire in the func_get_idx node
    preproc.connect(inputNode, 'func', func_get_idx, 'in_files')
    preproc.connect(inputNode, 'start_idx', func_get_idx, 'start_idx')
    preproc.connect(inputNode, 'stop_idx', func_get_idx, 'stop_idx')

    # allocate a node to edit the functional file
    func_drop_trs = pe.Node(interface=afni_utils.Calc(), name='func_drop_trs')

    func_drop_trs.inputs.expr = 'a'
    func_drop_trs.inputs.outputtype = 'NIFTI_GZ'

    # wire in the inputs
    preproc.connect(inputNode, 'func', func_drop_trs, 'in_file_a')

    preproc.connect(func_get_idx, 'startidx', func_drop_trs, 'start_idx')

    preproc.connect(func_get_idx, 'stopidx', func_drop_trs, 'stop_idx')

    # wire the output
    preproc.connect(func_drop_trs, 'out_file', outputNode, 'edited_func')

    return preproc
Esempio n. 9
0
def create_func_preproc(tool, wf_name='func_preproc'):
    """

    The main purpose of this workflow is to process functional data. Raw rest file is deobliqued and reoriented
    into RPI. Then take the mean intensity values over all time points for each voxel and use this image
    to calculate motion parameters. The image is then skullstripped, normalized and a processed mask is
    obtained to use it further in Image analysis.

    Parameters
    ----------

    wf_name : string
        Workflow name

    Returns
    -------
    func_preproc : workflow object
        Functional Preprocessing workflow object

    Notes
    -----

    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/func_preproc/func_preproc.py>`_

    Workflow Inputs::

        inputspec.func : func nifti file
            User input functional(T2) Image, in any of the 8 orientations

        inputspec.twopass : boolean
            Perform two-pass on volume registration

    Workflow Outputs::

        outputspec.refit : string (nifti file)
            Path to deobliqued anatomical data

        outputspec.reorient : string (nifti file)
            Path to RPI oriented anatomical data

        outputspec.motion_correct_ref : string (nifti file)
             Path to Mean intensity Motion corrected image
             (base reference image for the second motion correction run)

        outputspec.motion_correct : string (nifti file)
            Path to motion corrected output file

        outputspec.max_displacement : string (Mat file)
            Path to maximum displacement (in mm) for brain voxels in each volume

        outputspec.movement_parameters : string (Mat file)
            Path to 1D file containing six movement/motion parameters(3 Translation, 3 Rotations)
            in different columns (roll pitch yaw dS  dL  dP)

        outputspec.skullstrip : string (nifti file)
            Path to skull stripped Motion Corrected Image

        outputspec.mask : string (nifti file)
            Path to brain-only mask

        outputspec.func_mean : string (nifti file)
            Mean, Skull Stripped, Motion Corrected output T2 Image path
            (Image with mean intensity values across voxels)

        outputpsec.preprocessed : string (nifti file)
            output skull stripped, motion corrected T2 image
            with normalized intensity values

        outputspec.preprocessed_mask : string (nifti file)
           Mask obtained from normalized preprocessed image

    Order of commands:

    - Deobliqing the scans.  For details see `3drefit <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>`_::

        3drefit -deoblique rest_3dc.nii.gz

    - Re-orienting the Image into Right-to-Left Posterior-to-Anterior Inferior-to-Superior (RPI) orientation. For details see `3dresample <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dresample.html>`_::

        3dresample -orient RPI
                   -prefix rest_3dc_RPI.nii.gz
                   -inset rest_3dc.nii.gz

    - Calculate voxel wise statistics. Get the RPI Image with mean intensity values over all timepoints for each voxel. For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean
                -prefix rest_3dc_RPI_3dT.nii.gz
                rest_3dc_RPI.nii.gz

    - Motion Correction. For details see `3dvolreg <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_::

        3dvolreg -Fourier
                 -twopass
                 -base rest_3dc_RPI_3dT.nii.gz/
                 -zpad 4
                 -maxdisp1D rest_3dc_RPI_3dvmd1D.1D
                 -1Dfile rest_3dc_RPI_3dv1D.1D
                 -prefix rest_3dc_RPI_3dv.nii.gz
                 rest_3dc_RPI.nii.gz

      The base image or the reference image is the mean intensity RPI image obtained in the above the step.For each volume
      in RPI-oriented T2 image, the command, aligns the image with the base mean image and calculates the motion, displacement
      and movement parameters. It also outputs the aligned 4D volume and movement and displacement parameters for each volume.

    - Calculate voxel wise statistics. Get the motion corrected output Image from the above step, with mean intensity values over all timepoints for each voxel.
      For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean
                -prefix rest_3dc_RPI_3dv_3dT.nii.gz
                rest_3dc_RPI_3dv.nii.gz

    - Motion Correction and get motion, movement and displacement parameters. For details see `3dvolreg <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_::

        3dvolreg -Fourier
                 -twopass
                 -base rest_3dc_RPI_3dv_3dT.nii.gz
                 -zpad 4
                 -maxdisp1D rest_3dc_RPI_3dvmd1D.1D
                 -1Dfile rest_3dc_RPI_3dv1D.1D
                 -prefix rest_3dc_RPI_3dv.nii.gz
                 rest_3dc_RPI.nii.gz

      The base image or the reference image is the mean intensity motion corrected image obtained from the above the step (first 3dvolreg run).
      For each volume in RPI-oriented T2 image, the command, aligns the image with the base mean image and calculates the motion, displacement
      and movement parameters. It also outputs the aligned 4D volume and movement and displacement parameters for each volume.

    - Create a brain-only mask. For details see `3dautomask <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutomask.html>`_::

        3dAutomask
                   -prefix rest_3dc_RPI_3dv_automask.nii.gz
                   rest_3dc_RPI_3dv.nii.gz

    - Edge Detect(remove skull) and get the brain only. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::

        3dcalc -a rest_3dc_RPI_3dv.nii.gz
               -b rest_3dc_RPI_3dv_automask.nii.gz
               -expr 'a*b'
               -prefix rest_3dc_RPI_3dv_3dc.nii.gz

    - Normalizing the image intensity values. For details see `fslmaths <http://www.fmrib.ox.ac.uk/fsl/avwutils/index.html>`_::

        fslmaths rest_3dc_RPI_3dv_3dc.nii.gz
                 -ing 10000 rest_3dc_RPI_3dv_3dc_maths.nii.gz
                 -odt float

      Normalized intensity = (TrueValue*10000)/global4Dmean

    - Calculate mean of skull stripped image. For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean -prefix rest_3dc_RPI_3dv_3dc_3dT.nii.gz rest_3dc_RPI_3dv_3dc.nii.gz

    - Create Mask (Generate mask from Normalized data). For details see `fslmaths <http://www.fmrib.ox.ac.uk/fsl/avwutils/index.html>`_::

        fslmaths rest_3dc_RPI_3dv_3dc_maths.nii.gz
               -Tmin -bin rest_3dc_RPI_3dv_3dc_maths_maths.nii.gz
               -odt char

    .. exec::
        from CPAC.func_preproc import create_func_preproc
        wf = create_func_preproc()
        wf.write_graph(
            graph2use='orig',
            dotfilename='./images/generated/func_preproc.dot'
        )

    High Level Workflow Graph:

    .. image:: ../images/generated/func_preproc.png
       :width: 1000

    Detailed Workflow Graph:

    .. image:: ../images/generated/func_preproc_detailed.png
       :width: 1000

    Examples
    --------

    >>> import func_preproc
    >>> preproc = create_func_preproc(bet=True)
    >>> preproc.inputs.inputspec.func='sub1/func/rest.nii.gz'
    >>> preproc.run() #doctest: +SKIP


    >>> import func_preproc
    >>> preproc = create_func_preproc(bet=False)
    >>> preproc.inputs.inputspec.func='sub1/func/rest.nii.gz'
    >>> preproc.run() #doctest: +SKIP

    """

    preproc = pe.Workflow(name=wf_name)
    input_node = pe.Node(util.IdentityInterface(fields=['func', 'twopass']),
                         name='inputspec')

    output_node = pe.Node(util.IdentityInterface(fields=[
        'refit', 'reorient', 'reorient_mean', 'motion_correct',
        'motion_correct_ref', 'movement_parameters', 'max_displacement',
        'mask', 'skullstrip', 'func_mean', 'preprocessed', 'preprocessed_mask',
        'slice_time_corrected', 'oned_matrix_save'
    ]),
                          name='outputspec')

    func_deoblique = pe.Node(interface=afni_utils.Refit(),
                             name='func_deoblique')
    func_deoblique.inputs.deoblique = True

    preproc.connect(input_node, 'func', func_deoblique, 'in_file')

    func_reorient = pe.Node(interface=afni_utils.Resample(),
                            name='func_reorient')

    func_reorient.inputs.orientation = 'RPI'
    func_reorient.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_deoblique, 'out_file', func_reorient, 'in_file')

    preproc.connect(func_reorient, 'out_file', output_node, 'reorient')

    func_get_mean_RPI = pe.Node(interface=afni_utils.TStat(),
                                name='func_get_mean_RPI')

    func_get_mean_RPI.inputs.options = '-mean'
    func_get_mean_RPI.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_reorient, 'out_file', func_get_mean_RPI, 'in_file')

    # calculate motion parameters
    func_motion_correct = pe.Node(interface=preprocess.Volreg(),
                                  name='func_motion_correct')
    func_motion_correct.inputs.zpad = 4
    func_motion_correct.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect([
        (input_node, func_motion_correct,
         [(('twopass', collect_arguments, '-twopass', '-Fourier'), 'args')]),
    ])

    preproc.connect(func_reorient, 'out_file', func_motion_correct, 'in_file')
    preproc.connect(func_get_mean_RPI, 'out_file', func_motion_correct,
                    'basefile')

    func_get_mean_motion = func_get_mean_RPI.clone('func_get_mean_motion')
    preproc.connect(func_motion_correct, 'out_file', func_get_mean_motion,
                    'in_file')

    preproc.connect(func_get_mean_motion, 'out_file', output_node,
                    'motion_correct_ref')

    func_motion_correct_A = func_motion_correct.clone('func_motion_correct_A')
    func_motion_correct_A.inputs.md1d_file = 'max_displacement.1D'

    preproc.connect([
        (input_node, func_motion_correct_A,
         [(('twopass', collect_arguments, '-twopass', '-Fourier'), 'args')]),
    ])

    preproc.connect(func_reorient, 'out_file', func_motion_correct_A,
                    'in_file')
    preproc.connect(func_get_mean_motion, 'out_file', func_motion_correct_A,
                    'basefile')

    preproc.connect(func_motion_correct_A, 'out_file', output_node,
                    'motion_correct')
    preproc.connect(func_motion_correct_A, 'md1d_file', output_node,
                    'max_displacement')
    preproc.connect(func_motion_correct_A, 'oned_file', output_node,
                    'movement_parameters')
    preproc.connect(func_motion_correct_A, 'oned_matrix_save', output_node,
                    'oned_matrix_save')

    skullstrip_func = skullstrip_functional(tool,
                                            "{0}_skullstrip".format(wf_name))

    preproc.connect(func_motion_correct_A, 'out_file', skullstrip_func,
                    'inputspec.func')
    preproc.connect(skullstrip_func, 'outputspec.func_brain', output_node,
                    'skullstrip')
    preproc.connect(skullstrip_func, 'outputspec.func_brain_mask', output_node,
                    'mask')

    func_mean = pe.Node(interface=afni_utils.TStat(), name='func_mean')

    func_mean.inputs.options = '-mean'
    func_mean.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(skullstrip_func, 'outputspec.func_brain', func_mean,
                    'in_file')

    preproc.connect(func_mean, 'out_file', output_node, 'func_mean')

    func_normalize = pe.Node(interface=fsl.ImageMaths(), name='func_normalize')
    func_normalize.inputs.op_string = '-ing 10000'
    func_normalize.inputs.out_data_type = 'float'

    preproc.connect(skullstrip_func, 'outputspec.func_brain', func_normalize,
                    'in_file')

    preproc.connect(func_normalize, 'out_file', output_node, 'preprocessed')

    func_mask_normalize = pe.Node(interface=fsl.ImageMaths(),
                                  name='func_mask_normalize')
    func_mask_normalize.inputs.op_string = '-Tmin -bin'
    func_mask_normalize.inputs.out_data_type = 'char'

    preproc.connect(func_normalize, 'out_file', func_mask_normalize, 'in_file')

    preproc.connect(func_mask_normalize, 'out_file', output_node,
                    'preprocessed_mask')

    return preproc
Esempio n. 10
0
def BAWantsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :
           inputspec.interpolationMapping :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """
    TemplateBuildSingleIterationWF = pe.Workflow(name='antsRegistrationTemplateBuildSingleIterationWF_' + str(iterationPhasePrefix))

    inputSpec = pe.Node(interface=util.IdentityInterface(fields=[
        'ListOfImagesDictionaries', 'registrationImageTypes',
        #'maskRegistrationImageType',
        'interpolationMapping', 'fixed_image']),
        run_without_submitting=True,
        name='inputspec')
    ## HACK: TODO: We need to have the AVG_AIR.nii.gz be warped with a default voxel value of 1.0
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(fields=['template', 'transforms_list',
                                                                  'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=Registration(), name='BeginANTS', iterfield=['moving_image'])
    BeginANTS.inputs.dimension = 3
    """ This is the recommended set of parameters from the ANTS developers """
    BeginANTS.inputs.output_transform_prefix = str(iterationPhasePrefix) + '_tfm'
    BeginANTS.inputs.transforms = ["Rigid", "Similarity", "Affine", "SyN"]
    BeginANTS.inputs.transform_parameters = [[0.1], [0.1], [0.1], [0.15, 3.0, 0.0]]
    BeginANTS.inputs.metric = ['Mattes', 'Mattes', 'Mattes', 'CC']
    BeginANTS.inputs.sampling_strategy = ['Regular', 'Regular', 'Regular', None]
    BeginANTS.inputs.sampling_percentage = [1.0, 1.0, 1.0, 1.0]
    BeginANTS.inputs.metric_weight = [1.0, 1.0, 1.0, 1.0]
    BeginANTS.inputs.radius_or_number_of_bins = [32, 32, 32, 4]
    BeginANTS.inputs.convergence_threshold = [5e-7, 5e-7, 5e-7, 5e-7]
    BeginANTS.inputs.convergence_window_size = [25, 25, 25, 25]
    BeginANTS.inputs.use_histogram_matching = [True, True, True, True]
    BeginANTS.inputs.number_of_iterations = [[2000, 2000], [2000, 2000], [1000, 1000, 100], [10000, 500, 500, 200]]
    BeginANTS.inputs.smoothing_sigmas = [[4, 2], [5, 2], [4, 2, 1], [5, 4, 2, 0]]
    BeginANTS.inputs.shrink_factors = [[4, 2], [5, 2], [4, 2, 1], [5, 4, 2, 1]]
    BeginANTS.inputs.use_estimate_learning_rate_once = [False, False, False, False]
    BeginANTS.inputs.write_composite_transform = True
    BeginANTS.inputs.collapse_output_transforms = True
    BeginANTS.inputs.winsorize_lower_quantile = 0.025
    BeginANTS.inputs.winsorize_upper_quantile = 0.975
    BeginANTS.inputs.output_warped_image = 'atlas2subject.nii.gz'
    BeginANTS.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'

    GetMovingImagesNode = pe.Node(interface=util.Function(function=GetMovingImages,
                                                          input_names=['ListOfImagesDictionaries', 'registrationImageTypes', 'interpolationMapping'],
                                                          output_names=['moving_images', 'moving_interpolation_type']),
                                  run_without_submitting=True,
                                  name='99_GetMovingImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'ListOfImagesDictionaries', GetMovingImagesNode, 'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', GetMovingImagesNode, 'registrationImageTypes')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', GetMovingImagesNode, 'interpolationMapping')

    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_images', BeginANTS, 'moving_image')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_interpolation_type', BeginANTS, 'interpolation')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, 'fixed_image')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(interface=ApplyTransforms(),
                              iterfield=['transforms', 'invert_transform_flags', 'input_image'],
                              name='wimtdeformed')
    wimtdeformed.inputs.interpolation = 'Linear'
    wimtdeformed.default_value = 0
    # HACK: Should try using forward_composite_transform
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', wimtdeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', wimtdeformed, 'invert_transform_flags')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, 'moving_images', wimtdeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', wimtdeformed, 'reference_image')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(), name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(), name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(iterationPhasePrefix) + '_Affine.h5'

    SplitAffineAndWarpsNode = pe.Node(interface=util.Function(function=SplitAffineAndWarpComponents,
                                      input_names=['list_of_transforms_lists'],
                                      output_names=['affine_component_list', 'warp_component_list']),
                                      run_without_submitting=True,
                                      name='99_SplitAffineAndWarpsNode')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', SplitAffineAndWarpsNode, 'list_of_transforms_lists')
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, 'affine_component_list', AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, 'warp_component_list', AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(), name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages, 'output_average_image', GradientStepWarpImage, 'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=ApplyTransforms(), name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_transform_flags = [True]
    UpdateTemplateShape.inputs.interpolation = 'Linear'
    UpdateTemplateShape.default_value = 0

    TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', UpdateTemplateShape, 'reference_image')
    TemplateBuildSingleIterationWF.connect([(AvgAffineTransform, UpdateTemplateShape, [(('affine_transform', makeListOfOneElement), 'transforms')]), ])
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, 'output_product_image', UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(interface=util.Function(function=MakeTransformListWithGradientWarps,
                                                                                       input_names=['averageAffineTranform', 'gradientStepWarp'],
                                                                                       output_names=['TransformListWithGradientWarps']),
                                                               run_without_submitting=True,
                                                               name='99_MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(AvgAffineTransform, 'affine_transform', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(UpdateTemplateShape, 'output_image', ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [True, False, False, False, False]
    ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear'
    ReshapeAverageImageWithShapeUpdate.default_value = 0
    ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', ReshapeAverageImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, 'output_image', outputSpec, 'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(Function(function=FlattenTransformAndImagesList,
                                                         input_names=['ListOfPassiveImagesDictionaries', 'transforms',
                                                                      'invert_transform_flags', 'interpolationMapping'],
                                                         output_names=['flattened_images', 'flattened_transforms', 'flattened_invert_transform_flags',
                                                                       'flattened_image_nametypes', 'flattened_interpolation_type']),
                                                run_without_submitting=True, name="99_FlattenTransformAndImagesList")

    GetPassiveImagesNode = pe.Node(interface=util.Function(function=GetPassiveImages,
                                                           input_names=['ListOfImagesDictionaries', 'registrationImageTypes'],
                                                           output_names=['ListOfPassiveImagesDictionaries']),
                                   run_without_submitting=True,
                                   name='99_GetPassiveImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'ListOfImagesDictionaries', GetPassiveImagesNode, 'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', GetPassiveImagesNode, 'registrationImageTypes')

    TemplateBuildSingleIterationWF.connect(GetPassiveImagesNode, 'ListOfPassiveImagesDictionaries', FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', FlattenTransformAndImagesListNode, 'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', FlattenTransformAndImagesListNode, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', FlattenTransformAndImagesListNode, 'invert_transform_flags')
    wimtPassivedeformed = pe.MapNode(interface=ApplyTransforms(),
                                     iterfield=['transforms', 'invert_transform_flags', 'input_image', 'interpolation'],
                                     name='wimtPassivedeformed')
    wimtPassivedeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages, 'output_average_image', wimtPassivedeformed, 'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_interpolation_type', wimtPassivedeformed, 'interpolation')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_images', wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_transforms', wimtPassivedeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_invert_transform_flags', wimtPassivedeformed, 'invert_transform_flags')

    RenestDeformedPassiveImagesNode = pe.Node(Function(function=RenestDeformedPassiveImages,
                                                       input_names=['deformedPassiveImages', 'flattened_image_nametypes', 'interpolationMapping'],
                                                       output_names=['nested_imagetype_list', 'outputAverageImageName_list',
                                                                     'image_type_list', 'nested_interpolation_type']),
                                              run_without_submitting=True, name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', RenestDeformedPassiveImagesNode, 'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', RenestDeformedPassiveImagesNode, 'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, 'flattened_image_nametypes', RenestDeformedPassiveImagesNode, 'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(interface=AverageImages(),
                                          iterfield=['images', 'output_average_image'],
                                          name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "nested_imagetype_list", AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, "outputAverageImageName_list", AvgDeformedPassiveImages, 'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(interface=ApplyTransforms(),
                                                           iterfield=['input_image', 'reference_image', 'output_image', 'interpolation'],
                                                           name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [True, False, False, False, False]
    ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, 'nested_interpolation_type', ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, 'outputAverageImageName_list', ReshapeAveragePassiveImageWithShapeUpdate, 'output_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedPassiveImages, 'output_average_image', ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(ApplyInvAverageAndFourTimesGradientStepWarpImage, 'TransformListWithGradientWarps', ReshapeAveragePassiveImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, 'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
Esempio n. 11
0
def create_extract_pipe(params_template, params={}, name="extract_pipe"):
    """
    Description: Extract T1 brain using AtlasBrex

    Inputs:

        inputnode:
            restore_T1: preprocessed (debiased/denoised) T1 file name

            restore_T1: preprocessed (debiased/denoised)T2 file name

        arguments:
            params_template: dictionary of info about template

            params: dictionary of node sub-parameters (from a json file)

            name: pipeline name (default = "extract_pipe")

    Outputs:

        smooth_mask.out_file:
            Computed mask (after some smoothing)

    """

    # creating pipeline
    extract_pipe = pe.Workflow(name=name)

    # creating inputnode
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['restore_T1', 'restore_T2', "indiv_params"]),
                        name='inputnode')

    # atlas_brex
    atlas_brex = NodeParams(AtlasBREX(),
                            params=parse_key(params, "atlas_brex"),
                            name='atlas_brex')

    extract_pipe.connect(inputnode, "restore_T1", atlas_brex,
                         't1_restored_file')

    atlas_brex.inputs.NMT_file = params_template["template_head"]
    atlas_brex.inputs.NMT_SS_file = params_template["template_brain"]

    extract_pipe.connect(inputnode, ("indiv_params", parse_key, "atlas_brex"),
                         atlas_brex, 'indiv_params')

    # mask_brex
    mask_brex = pe.Node(fsl.UnaryMaths(), name='mask_brex')
    mask_brex.inputs.operation = 'bin'

    extract_pipe.connect(atlas_brex, 'brain_file', mask_brex, 'in_file')

    # smooth_mask
    smooth_mask = pe.Node(fsl.UnaryMaths(), name='smooth_mask')
    smooth_mask.inputs.operation = "bin"
    smooth_mask.inputs.args = "-s 1 -thr 0.5 -bin"

    extract_pipe.connect(mask_brex, 'out_file', smooth_mask, 'in_file')

    # mult_T1
    mult_T1 = pe.Node(afni.Calc(), name='mult_T1')
    mult_T1.inputs.expr = "a*b"
    mult_T1.inputs.outputtype = 'NIFTI_GZ'

    extract_pipe.connect(inputnode, "restore_T1", mult_T1, 'in_file_a')
    extract_pipe.connect(smooth_mask, 'out_file', mult_T1, 'in_file_b')

    # mult_T2
    mult_T2 = pe.Node(afni.Calc(), name='mult_T2')
    mult_T2.inputs.expr = "a*b"
    mult_T2.inputs.outputtype = 'NIFTI_GZ'

    extract_pipe.connect(inputnode, 'restore_T2', mult_T2, 'in_file_a')
    extract_pipe.connect(smooth_mask, 'out_file', mult_T2, 'in_file_b')
    return extract_pipe
Esempio n. 12
0
def tsnr_roi(roi=[1021], name='roi_flow', plot=False, onsets=False):
    """ Return a workflow that outputs either a graph of the average \
        
    timseries of each roi specified OR a table of average value across \
    all timeseries for each voxel in each ROI.
    
    Parameters
    ----------
    roi : List of Integers or ['all']
          Specify a list of ROI number corresponding to the Freesurfer LUT.
          Default = 1021 (lh-pericalcarine)
    name : String
           Name of workflow. 
           Default = 'roi_flow'
    plot : Boolean
           True if workflow should output timeseries plots/ROI
           False if workflow should output a table of avg.value/ROI
           Default = False
           
    Inputs
    ------
    inputspec.reg_file :
    inputspec.tsnr_file :
    inputspec.TR :
    inputspec.subject :
    inputspec.sd :
    
    Outputs
    -------
    outputspec.out_file :
    
    
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.interfaces.freesurfer import ApplyVolTransform
    from nipype.workflows.smri.freesurfer.utils import create_get_stats_flow

    preproc = pe.Workflow(name=name)

    inputspec = pe.Node(interface=util.IdentityInterface(fields=[
        'reg_file', 'tsnr_file', 'TR', 'aparc_aseg', 'subject', 'onsets',
        'input_units', 'sd'
    ]),
                        name='inputspec')

    voltransform = pe.MapNode(interface=ApplyVolTransform(inverse=True,
                                                          interp='nearest'),
                              name='applyreg',
                              iterfield=['source_file'])

    preproc.connect(inputspec, 'tsnr_file', voltransform, 'source_file')

    preproc.connect(inputspec, 'reg_file', voltransform, 'reg_file')

    preproc.connect(inputspec, 'aparc_aseg', voltransform, 'target_file')

    statsflow = create_get_stats_flow()
    preproc.connect(voltransform, 'transformed_file', statsflow,
                    'inputspec.label_file')
    preproc.connect(inputspec, 'tsnr_file', statsflow, 'inputspec.source_file')

    statsflow.inputs.segstats.avgwf_txt_file = True

    def strip_ids(subject_id, summary_file, roi_file):
        import numpy as np
        import os
        roi_idx = np.genfromtxt(summary_file)[:, 1].astype(int)
        roi_vals = np.genfromtxt(roi_file)
        roi_vals = np.atleast_2d(roi_vals)
        rois2skip = [
            0, 2, 4, 5, 7, 14, 15, 24, 30, 31, 41, 43, 44, 46, 62, 63, 77, 80,
            85, 1000, 2000
        ]
        ids2remove = []
        for roi in rois2skip:
            idx, = np.nonzero(roi_idx == roi)
            ids2remove.extend(idx)
        ids2keep = np.setdiff1d(range(roi_idx.shape[0]), ids2remove)
        filename = os.path.join(os.getcwd(), subject_id + '.csv')
        newvals = np.vstack(
            (roi_idx[ids2keep], roi_vals[:, np.array(ids2keep)])).T
        np.savetxt(filename, newvals, '%.4f', delimiter=',')
        return filename

    roistripper = pe.MapNode(util.Function(
        input_names=['subject_id', 'summary_file', 'roi_file'],
        output_names=['roi_file'],
        function=strip_ids),
                             name='roistripper',
                             iterfield=['summary_file', 'roi_file'])

    preproc.connect(inputspec, 'subject', roistripper, 'subject_id')

    preproc.connect(statsflow, 'segstats.avgwf_txt_file', roistripper,
                    'roi_file')
    preproc.connect(statsflow, 'segstats.summary_file', roistripper,
                    'summary_file')

    if onsets:
        roiplotter = pe.MapNode(util.Function(
            input_names=['statsfile', 'roi', 'TR', 'plot', 'onsets', 'units'],
            output_names=['Fname', 'AvgRoi'],
            function=plot_timeseries),
                                name='roiplotter',
                                iterfield=['statsfile', 'onsets'])
        preproc.connect(inputspec, 'onsets', roiplotter, 'onsets')
        preproc.connect(inputspec, 'input_units', roiplotter, 'units')
    else:
        roiplotter = pe.MapNode(util.Function(
            input_names=['statsfile', 'roi', 'TR', 'plot', 'onsets', 'units'],
            output_names=['Fname', 'AvgRoi'],
            function=plot_timeseries),
                                name='roiplotter',
                                iterfield=['statsfile'])
        roiplotter.inputs.onsets = None
        roiplotter.inputs.units = None

    roiplotter.inputs.roi = roi
    preproc.connect(inputspec, 'TR', roiplotter, 'TR')
    roiplotter.inputs.plot = plot
    preproc.connect(roistripper, 'roi_file', roiplotter, 'statsfile')

    outputspec = pe.Node(interface=util.IdentityInterface(
        fields=['out_file', 'roi_table', 'roi_file']),
                         name='outputspec')
    preproc.connect(roiplotter, 'Fname', outputspec, 'out_file')
    preproc.connect(roiplotter, 'AvgRoi', outputspec, 'roi_table')
    preproc.connect(roistripper, 'roi_file', outputspec, 'roi_file')

    return preproc
Esempio n. 13
0
def cluster_image(name="threshold_cluster_makeimages"):
    from nipype.interfaces import fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util

    workflow = pe.Workflow(name=name)
    inputspec = pe.Node(util.IdentityInterface(fields=[
        "zstat", "mask", "zthreshold", "pthreshold", "connectivity",
        'anatomical'
    ]),
                        name="inputspec")
    smoothest = pe.MapNode(fsl.SmoothEstimate(),
                           name='smooth_estimate',
                           iterfield=['zstat_file'])
    workflow.connect(inputspec, 'zstat', smoothest, 'zstat_file')
    workflow.connect(inputspec, 'mask', smoothest, 'mask_file')

    cluster = pe.MapNode(fsl.Cluster(out_localmax_txt_file=True,
                                     out_index_file=True,
                                     out_localmax_vol_file=True),
                         name='cluster',
                         iterfield=['in_file', 'dlh', 'volume'])
    workflow.connect(smoothest, 'dlh', cluster, 'dlh')
    workflow.connect(smoothest, 'volume', cluster, 'volume')
    workflow.connect(inputspec, "zthreshold", cluster, "threshold")
    workflow.connect(inputspec, "pthreshold", cluster, "pthreshold")
    workflow.connect(inputspec, "connectivity", cluster, "connectivity")
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_pval_file = True
    workflow.connect(inputspec, 'zstat', cluster, 'in_file')
    """
    labels = pe.MapNode(util.Function(input_names=['in_file','thr','csize'],
                                   output_names=['labels'],function=get_labels),
        name='labels',iterfield=["in_file"])

    workflow.connect(inputspec,"zthreshold",labels,"thr")
    workflow.connect(inputspec,"connectivity",labels,"csize")
    workflow.connect(cluster,"threshold_file",labels,"in_file")
    showslice=pe.MapNode(util.Function(input_names=['image_in','anat_file','coordinates','thr'],
                                    output_names=["outfiles"],function=show_slices),
              name='showslice',iterfield=["image_in","coordinates"])

    coords = pe.MapNode(util.Function(input_names=["in_file","img"],
                                   output_names=["coords"],
                                   function=get_coords2),
        name='getcoords', iterfield=["in_file","img"])

    workflow.connect(cluster,'threshold_file',showslice,'image_in')
    workflow.connect(inputspec,'anatomical',showslice,"anat_file")
    workflow.connect(inputspec,'zthreshold',showslice,'thr')
    workflow.connect(labels,'labels',coords,"img")
    workflow.connect(cluster,"threshold_file",coords,"in_file")
    workflow.connect(coords,"coords",showslice,"coordinates")

    overlay = pe.MapNode(util.Function(input_names=["stat_image",
                                                 "background_image",
                                                 "threshold"],
                                       output_names=["fnames"],function=overlay_new),
                         name='overlay', iterfield=["stat_image"])
    workflow.connect(inputspec,"anatomical", overlay,"background_image")
    workflow.connect(cluster,"threshold_file",overlay,"stat_image")
    workflow.connect(inputspec,"zthreshold",overlay,"threshold")
    """
    outputspec = pe.Node(util.IdentityInterface(fields=[
        "corrected_z", "localmax_txt", "index_file", "localmax_vol", "slices",
        "cuts", "corrected_p"
    ]),
                         name='outputspec')
    workflow.connect(cluster, 'threshold_file', outputspec, 'corrected_z')
    workflow.connect(cluster, 'index_file', outputspec, 'index_file')
    workflow.connect(cluster, 'localmax_vol_file', outputspec, 'localmax_vol')
    #workflow.connect(showslice,"outfiles",outputspec,"slices")
    #workflow.connect(overlay,"fnames",outputspec,"cuts")
    workflow.connect(cluster, 'localmax_txt_file', outputspec, 'localmax_txt')
    return workflow
Esempio n. 14
0
def cluster_image2(name="threshold_cluster_makeimages"):
    from nipype.interfaces import fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util

    workflow = pe.Workflow(name=name)
    inputspec = pe.Node(util.IdentityInterface(fields=[
        "pstat", "mask", "threshold", "min_cluster_size", 'anatomical'
    ]),
                        name="inputspec")

    do_fdr = pe.MapNode(util.Function(
        input_names=['in_file', 'mask_file', 'pthresh'],
        output_names=['qstat', 'qthresh', 'qrate'],
        function=fdr),
                        name='do_fdr',
                        iterfield=['in_file'])

    cluster = pe.MapNode(fsl.Cluster(out_localmax_txt_file=True,
                                     out_index_file=True,
                                     out_localmax_vol_file=True),
                         name='cluster',
                         iterfield=['in_file', 'threshold'])

    workflow.connect(inputspec, 'pstat', do_fdr, 'in_file')
    workflow.connect(inputspec, 'mask', do_fdr, 'mask_file')
    workflow.connect(inputspec, 'threshold', do_fdr, 'pthresh')

    workflow.connect(do_fdr, "qthresh", cluster, "threshold")
    #workflow.connect(inputspec,"connectivity",cluster,"connectivity")
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_pval_file = True
    workflow.connect(do_fdr, 'qstat', cluster, 'in_file')

    labels = pe.MapNode(util.Function(input_names=['in_file', 'thr', 'csize'],
                                      output_names=['labels'],
                                      function=get_labels),
                        name='labels',
                        iterfield=["in_file", "thr"])

    workflow.connect(do_fdr, "qthresh", labels, "thr")
    workflow.connect(inputspec, "min_cluster_size", labels, "csize")
    workflow.connect(cluster, "threshold_file", labels, "in_file")

    showslice = pe.MapNode(util.Function(
        input_names=['image_in', 'anat_file', 'coordinates', 'thr'],
        output_names=["outfiles"],
        function=show_slices),
                           name='showslice',
                           iterfield=["image_in", "coordinates", 'thr'])

    coords = pe.MapNode(util.Function(input_names=["in_file", "img"],
                                      output_names=["coords"],
                                      function=get_coords2),
                        name='getcoords',
                        iterfield=["in_file", "img"])

    workflow.connect(cluster, 'threshold_file', showslice, 'image_in')
    workflow.connect(inputspec, 'anatomical', showslice, "anat_file")
    workflow.connect(do_fdr, 'qthresh', showslice, 'thr')
    workflow.connect(labels, 'labels', coords, "img")
    workflow.connect(cluster, "threshold_file", coords, "in_file")
    workflow.connect(coords, "coords", showslice, "coordinates")

    overlay = pe.MapNode(util.Function(
        input_names=["stat_image", "background_image", "threshold"],
        output_names=["fnames"],
        function=overlay_new),
                         name='overlay',
                         iterfield=["stat_image", 'threshold'])
    workflow.connect(inputspec, "anatomical", overlay, "background_image")
    workflow.connect(cluster, "threshold_file", overlay, "stat_image")
    workflow.connect(do_fdr, "qthresh", overlay, "threshold")
    #workflow.connect(cluster, 'threshold_file',imgflow,'inputspec.in_file')
    #workflow.connect(dataflow,'func',imgflow, 'inputspec.in_file')
    #workflow.connect(inputspec,'mask',imgflow, 'inputspec.mask_file')

    outputspec = pe.Node(util.IdentityInterface(fields=[
        "corrected_p", "localmax_txt", "index_file", "localmax_vol", "slices",
        "cuts", "corrected_p", "qrate"
    ]),
                         name='outputspec')
    workflow.connect(cluster, 'threshold_file', outputspec, 'corrected_p')
    workflow.connect(showslice, "outfiles", outputspec, "slices")
    workflow.connect(overlay, "fnames", outputspec, "cuts")
    workflow.connect(cluster, 'localmax_txt_file', outputspec, 'localmax_txt')
    workflow.connect(do_fdr, "qrate", outputspec, 'qrate')
    #workflow.connect(logp,'out_file',outputspec,"corrected_p")
    return workflow
Esempio n. 15
0
def create_func_datasource(rest_dict, wf_name='func_datasource'):
    """Return the functional timeseries-related file paths for each
    series/scan, from the dictionary of functional files described in the data
    configuration (sublist) YAML file.

    Scan input (from inputnode) is an iterable.
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util

    wf = pe.Workflow(name=wf_name)

    inputnode = pe.Node(util.IdentityInterface(
        fields=['subject', 'scan', 'creds_path', 'dl_dir'],
        mandatory_inputs=True),
                        name='inputnode')

    outputnode = pe.Node(util.IdentityInterface(fields=[
        'subject', 'rest', 'scan', 'scan_params', 'phase_diff', 'magnitude'
    ]),
                         name='outputspec')

    # have this here for now because of the big change in the data
    # configuration format
    check_scan = pe.Node(function.Function(
        input_names=['func_scan_dct', 'scan'],
        output_names=[],
        function=check_func_scan,
        as_module=True),
                         name='check_func_scan')

    check_scan.inputs.func_scan_dct = rest_dict
    wf.connect(inputnode, 'scan', check_scan, 'scan')

    # get the functional scan itself
    selectrest = pe.Node(function.Function(
        input_names=['scan', 'rest_dict', 'resource'],
        output_names=['file_path'],
        function=get_rest,
        as_module=True),
                         name='selectrest')
    selectrest.inputs.rest_dict = rest_dict
    selectrest.inputs.resource = "scan"
    wf.connect(inputnode, 'scan', selectrest, 'scan')

    # check to see if it's on an Amazon AWS S3 bucket, and download it, if it
    # is - otherwise, just return the local file path
    check_s3_node = pe.Node(function.Function(
        input_names=['file_path', 'creds_path', 'dl_dir', 'img_type'],
        output_names=['local_path'],
        function=check_for_s3,
        as_module=True),
                            name='check_for_s3')

    wf.connect(selectrest, 'file_path', check_s3_node, 'file_path')
    wf.connect(inputnode, 'creds_path', check_s3_node, 'creds_path')
    wf.connect(inputnode, 'dl_dir', check_s3_node, 'dl_dir')
    check_s3_node.inputs.img_type = 'func'

    wf.connect(inputnode, 'subject', outputnode, 'subject')
    wf.connect(check_s3_node, 'local_path', outputnode, 'rest')
    wf.connect(inputnode, 'scan', outputnode, 'scan')

    # scan parameters CSV
    select_scan_params = pe.Node(function.Function(
        input_names=['scan', 'rest_dict', 'resource'],
        output_names=['file_path'],
        function=get_rest,
        as_module=True),
                                 name='select_scan_params')
    select_scan_params.inputs.rest_dict = rest_dict
    select_scan_params.inputs.resource = "scan_parameters"
    wf.connect(inputnode, 'scan', select_scan_params, 'scan')

    # if the scan parameters file is on AWS S3, download it
    s3_scan_params = pe.Node(function.Function(
        input_names=['file_path', 'creds_path', 'dl_dir', 'img_type'],
        output_names=['local_path'],
        function=check_for_s3,
        as_module=True),
                             name='s3_scan_params')

    wf.connect(select_scan_params, 'file_path', s3_scan_params, 'file_path')
    wf.connect(inputnode, 'creds_path', s3_scan_params, 'creds_path')
    wf.connect(inputnode, 'dl_dir', s3_scan_params, 'dl_dir')
    wf.connect(s3_scan_params, 'local_path', outputnode, 'scan_params')

    # field map phase file, for field map distortion correction
    select_fmap_phase = pe.Node(function.Function(
        input_names=['scan', 'rest_dict', 'resource'],
        output_names=['file_path'],
        function=get_rest,
        as_module=True),
                                name='select_fmap_phase')
    select_fmap_phase.inputs.rest_dict = rest_dict
    select_fmap_phase.inputs.resource = "fmap_phase"
    wf.connect(inputnode, 'scan', select_fmap_phase, 'scan')

    s3_fmap_phase = pe.Node(function.Function(
        input_names=['file_path', 'creds_path', 'dl_dir', 'img_type'],
        output_names=['local_path'],
        function=check_for_s3,
        as_module=True),
                            name='s3_fmap_phase')
    s3_fmap_phase.inputs.img_type = "other"
    wf.connect(select_fmap_phase, 'file_path', s3_fmap_phase, 'file_path')
    wf.connect(inputnode, 'creds_path', s3_fmap_phase, 'creds_path')
    wf.connect(inputnode, 'dl_dir', s3_fmap_phase, 'dl_dir')
    wf.connect(s3_fmap_phase, 'local_path', outputnode, 'phase_diff')

    # field map magnitude file, for field map distortion correction
    select_fmap_mag = pe.Node(function.Function(
        input_names=['scan', 'rest_dict', 'resource'],
        output_names=['file_path'],
        function=get_rest,
        as_module=True),
                              name='select_fmap_mag')
    select_fmap_mag.inputs.rest_dict = rest_dict
    select_fmap_mag.inputs.resource = "fmap_mag"
    wf.connect(inputnode, 'scan', select_fmap_mag, 'scan')

    s3_fmap_mag = pe.Node(function.Function(
        input_names=['file_path', 'creds_path', 'dl_dir', 'img_type'],
        output_names=['local_path'],
        function=check_for_s3,
        as_module=True),
                          name='s3_fmap_mag')
    s3_fmap_mag.inputs.img_type = "other"
    wf.connect(select_fmap_mag, 'file_path', s3_fmap_mag, 'file_path')
    wf.connect(inputnode, 'creds_path', s3_fmap_mag, 'creds_path')
    wf.connect(inputnode, 'dl_dir', s3_fmap_mag, 'dl_dir')
    wf.connect(s3_fmap_mag, 'local_path', outputnode, 'magnitude')

    return wf
Esempio n. 16
0
def skullstrip_functional(tool='afni', wf_name='skullstrip_functional'):

    tool = tool.lower()
    if tool != 'afni' and tool != 'fsl' and tool != 'fsl_afni':
        raise Exception("\n\n[!] Error: The 'tool' parameter of the "
                        "'skullstrip_functional' workflow must be either "
                        "'afni' or 'fsl'.\n\nTool input: "
                        "{0}\n\n".format(tool))

    wf = pe.Workflow(name=wf_name)

    input_node = pe.Node(util.IdentityInterface(fields=['func']),
                         name='inputspec')

    output_node = pe.Node(
        util.IdentityInterface(fields=['func_brain', 'func_brain_mask']),
        name='outputspec')

    if tool == 'afni':
        func_get_brain_mask = pe.Node(interface=preprocess.Automask(),
                                      name='func_get_brain_mask_AFNI')
        func_get_brain_mask.inputs.outputtype = 'NIFTI_GZ'

        wf.connect(input_node, 'func', func_get_brain_mask, 'in_file')

        wf.connect(func_get_brain_mask, 'out_file', output_node,
                   'func_brain_mask')

    elif tool == 'fsl':
        func_get_brain_mask = pe.Node(interface=fsl.BET(),
                                      name='func_get_brain_mask_BET')

        func_get_brain_mask.inputs.mask = True
        func_get_brain_mask.inputs.functional = True

        erode_one_voxel = pe.Node(interface=fsl.ErodeImage(),
                                  name='erode_one_voxel')

        erode_one_voxel.inputs.kernel_shape = 'box'
        erode_one_voxel.inputs.kernel_size = 1.0

        wf.connect(input_node, 'func', func_get_brain_mask, 'in_file')

        wf.connect(func_get_brain_mask, 'mask_file', erode_one_voxel,
                   'in_file')

        wf.connect(erode_one_voxel, 'out_file', output_node, 'func_brain_mask')

    elif tool == 'fsl_afni':
        skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2,
                                                mask=True,
                                                functional=True),
                                        name='skullstrip_first_pass')
        bet_dilate = pe.Node(fsl.DilateImage(operation='max',
                                             kernel_shape='sphere',
                                             kernel_size=6.0,
                                             internal_datatype='char'),
                             name='skullstrip_first_dilate')
        bet_mask = pe.Node(fsl.ApplyMask(), name='skullstrip_first_mask')
        unifize = pe.Node(afni_utils.Unifize(
            t2=True,
            outputtype='NIFTI_GZ',
            args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
            out_file="uni.nii.gz"),
                          name='unifize')
        skullstrip_second_pass = pe.Node(preprocess.Automask(
            dilate=1, outputtype='NIFTI_GZ'),
                                         name='skullstrip_second_pass')
        combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
                                name='combine_masks')

        wf.connect([
            (input_node, skullstrip_first_pass, [('func', 'in_file')]),
            (skullstrip_first_pass, bet_dilate, [('mask_file', 'in_file')]),
            (bet_dilate, bet_mask, [('out_file', 'mask_file')]),
            (skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
            (bet_mask, unifize, [('out_file', 'in_file')]),
            (unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
            (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]),
            (skullstrip_second_pass, combine_masks, [('out_file',
                                                      'operand_file')]),
            (combine_masks, output_node, [('out_file', 'func_brain_mask')])
        ])

    func_edge_detect = pe.Node(interface=afni_utils.Calc(),
                               name='func_extract_brain')

    func_edge_detect.inputs.expr = 'a*b'
    func_edge_detect.inputs.outputtype = 'NIFTI_GZ'

    wf.connect(input_node, 'func', func_edge_detect, 'in_file_a')

    if tool == 'afni':
        wf.connect(func_get_brain_mask, 'out_file', func_edge_detect,
                   'in_file_b')
    elif tool == 'fsl':
        wf.connect(erode_one_voxel, 'out_file', func_edge_detect, 'in_file_b')
    elif tool == 'fsl_afni':
        wf.connect(combine_masks, 'out_file', func_edge_detect, 'in_file_b')

    wf.connect(func_edge_detect, 'out_file', output_node, 'func_brain')

    return wf
Esempio n. 17
0
def main(derivatives, ds):

    if ds == 'ds-01':
        subjects = ['{:02d}'.format(s) for s in range(1, 20)]
    elif ds == 'ds-02':
        subjects = ['{:02d}'.format(s) for s in range(1, 16)]
        subjects.pop(3) # Remove 4

    subjects = subjects
    wf_folder = '/tmp/workflow_folders'

    templates = {'preproc':op.join(derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                                   'sub-{subject}_task-randomdotmotion_run-*_space-T1w_desc-preproc_bold.nii.gz')}

    templates['individual_mask'] = op.join(derivatives, ds, 'pca_masks', 'sub-{subject}', 'anat',
                                           'sub-{subject}_desc-{mask}_space-T1w_subroi-{subroi}_roi.nii.gz')

    wf = pe.Workflow(name='extract_signal_submasks_{}'.format(ds),
                     base_dir=wf_folder)

    mask_identity = pe.Node(niu.IdentityInterface(fields=['mask', 'subroi']),
                            name='mask_identity')
    mask_identity.iterables = [('mask', ['stnl', 'stnr']), ('subroi', ['A', 'B', 'C'])]

    selector = pe.Node(nio.SelectFiles(templates),
                       name='selector')

    selector.iterables = [('subject', subjects)]
    wf.connect(mask_identity, 'mask', selector, 'mask')
    wf.connect(mask_identity, 'subroi', selector, 'subroi')

    def extract_signal(preproc, mask):
        from nilearn import image
        from nilearn import input_data
        from nipype.utils.filemanip import split_filename
        import os.path as op
        import pandas as pd

        _, fn, ext = split_filename(preproc)
        masker = input_data.NiftiMasker(mask, standardize='psc')

        data = pd.DataFrame(masker.fit_transform(preproc))

        new_fn = op.abspath('{}_signal.csv'.format(fn))
        data.to_csv(new_fn)

        return new_fn

    extract_signal_node = pe.MapNode(niu.Function(function=extract_signal,
                                     input_names=['preproc', 'mask'],
                                     output_names=['signal']),
                         iterfield=['preproc'],
                        name='extract_signal_node')

    wf.connect(selector, 'preproc', extract_signal_node, 'preproc')
    wf.connect(selector, 'individual_mask', extract_signal_node, 'mask')

    datasink_signal = pe.MapNode(DerivativesDataSink(base_directory=op.join(derivatives, ds),
                                                      out_path_base='extracted_signal'),
                                 iterfield=['source_file', 'in_file'],
                                  name='datasink_signal')

    wf.connect(selector, 'preproc', datasink_signal, 'source_file')
    wf.connect(extract_signal_node, 'signal', datasink_signal, 'in_file')
    wf.connect(mask_identity, 'mask', datasink_signal, 'desc')

    def get_subroi_suffix(subroi):
        return 'subroi-{}_roi'.format(subroi)

    wf.connect(mask_identity, ('subroi', get_subroi_suffix), datasink_signal, 'suffix')


    wf.run(plugin='MultiProc',
           plugin_args={'n_procs':4})
Esempio n. 18
0
def register_dti_maps_on_atlas(
        working_directory=None,
        name="register_dti_maps_on_atlas"):
    """
    Register FA-map on a subject towards a FA atlas and apply the estimated
    deformation to MD, AD & RD.

    This pipelines performs the analysis of tracts using a white matter atlas
    and computes mean value of the scalar on each tracts of this atlas. The
    pipelines registers the FA-map of a subject onto the FA-map of the atlas
    thanks to antsRegistrationSyNQuick. Then, the estimated deformation is
    applied to the MD-map, AD-map and RD-map. Finally, the labelled atlas
    is used to compute the statistics of each scalar on each tract of the white
    matter atlas.

    Args:
        working_directory (Optional[str]): Directory where the temporary
            results are stored. If not specified, it is
            automatically generated (generally in /tmp/).
        name (Optional[str]): Name of the pipelines.

    Inputnode:
        in_fa (str): FA-map of the subject in native space.
        in_md (str): MD-map of the subject in native space.
        in_ad (str): AD-map of the subject in native space.
        in_rd (str): RD-map of the subject in native space.

    Outputnode:
        out_affine_transform (str): Affine transformation matrix obtained by
            antsRegistrationSyNQuick after registration towards <atlas_name>.
        out_b_spline_transform (str): BSpline transformation obtained by
            antsRegistrationSyNQuick after registration towards <atlas_name>.
        out_norm_fa (str): FA-map registered on <atlas_name>.
        out_norm_md (str): MD-map registered on <atlas_name>.
        out_norm_ad (str): AD-map registered on <atlas_name>.
        out_norm_rd (str): RD-map registered on <atlas_name>.
    """
    import os
    import tempfile
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.utility as niu
    import nipype.pipeline.engine as pe
    from nipype.interfaces.ants import RegistrationSynQuick
    from clinica.utils.atlas import AtlasAbstract, JHUDTI811mm
    from clinica.utils.mri_registration import apply_ants_registration_syn_quick_transformation
    from clinica.utils.check_dependency import check_environment_variable

    atlas = JHUDTI811mm()

    if not isinstance(atlas, AtlasAbstract):
        raise Exception("Atlas element must be an AtlasAbstract type")

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_fa', 'in_md', 'in_ad', 'in_rd', 'in_atlas_scalar_image']),
        name='inputnode')
    fsl_dir = check_environment_variable('FSLDIR', 'FSL')
    fa_map = os.path.join(fsl_dir, 'data', 'atlases', 'JHU', 'JHU-ICBM-FA-1mm.nii.gz')
    inputnode.inputs.in_atlas_scalar_image = fa_map

    register_fa = pe.Node(
        interface=RegistrationSynQuick(),
        name='register_fa')

    apply_ants_registration_for_md = pe.Node(interface=niu.Function(
        input_names=['in_image', 'in_reference_image',
                     'in_affine_transformation', 'in_bspline_transformation',
                     'name_output_image'],
        output_names=['out_deformed_image'],
        function=apply_ants_registration_syn_quick_transformation),
        name='apply_ants_registration_for_md')
    apply_ants_registration_for_md.inputs.name_output_image = \
        'space-' + atlas.get_name_atlas() + '_res-' + atlas.get_spatial_resolution() + '_MD.nii.gz'
    apply_ants_registration_for_ad = pe.Node(interface=niu.Function(
        input_names=['in_image', 'in_reference_image',
                     'in_affine_transformation', 'in_bspline_transformation',
                     'name_output_image'],
        output_names=['out_deformed_image'],
        function=apply_ants_registration_syn_quick_transformation),
        name='apply_ants_registration_for_ad')
    apply_ants_registration_for_ad.inputs.name_output_image = \
        'space-' + atlas.get_name_atlas() + '_res-' + atlas.get_spatial_resolution() + '_AD.nii.gz'
    apply_ants_registration_for_rd = pe.Node(interface=niu.Function(
        input_names=['in_image', 'in_reference_image',
                     'in_affine_transformation', 'in_bspline_transformation',
                     'name_output_image'],
        output_names=['out_deformed_image'],
        function=apply_ants_registration_syn_quick_transformation),
        name='apply_ants_registration_for_rd')
    apply_ants_registration_for_rd.inputs.name_output_image = \
        'space-' + atlas.get_name_atlas() + '_res-' + atlas.get_spatial_resolution() + '_RD.nii.gz'

    thres_map = pe.Node(fsl.Threshold(thresh=0.0),
                        iterfield=['in_file'],
                        name='RemoveNegative')
    thres_fa = thres_map.clone('RemoveNegative_FA')
    thres_md = thres_map.clone('RemoveNegative_MD')
    thres_ad = thres_map.clone('RemoveNegative_AD')
    thres_rd = thres_map.clone('RemoveNegative_RD')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['out_norm_fa', 'out_norm_md', 'out_norm_ad', 'out_norm_rd',
                'out_affine_matrix', 'out_b_spline_transform']),
        name='outputnode')

    wf = pe.Workflow(name=name, base_dir=working_directory)
    wf.connect([
        # Registration of FA-map onto the atlas:
        (inputnode, register_fa, [('in_fa',                 'moving_image'),  # noqa
                                  ('in_atlas_scalar_image', 'fixed_image')]),  # noqa
        # Apply deformation field on MD, AD & RD:
        (inputnode,   apply_ants_registration_for_md, [('in_md',                 'in_image')]),  # noqa
        (inputnode,   apply_ants_registration_for_md, [('in_atlas_scalar_image', 'in_reference_image')]),  # noqa
        (register_fa, apply_ants_registration_for_md, [('out_matrix',            'in_affine_transformation')]),  # noqa
        (register_fa, apply_ants_registration_for_md, [('forward_warp_field',    'in_bspline_transformation')]),  # noqa

        (inputnode,   apply_ants_registration_for_ad, [('in_ad',                 'in_image')]),  # noqa
        (inputnode,   apply_ants_registration_for_ad, [('in_atlas_scalar_image', 'in_reference_image')]),  # noqa
        (register_fa, apply_ants_registration_for_ad, [('out_matrix',            'in_affine_transformation')]),  # noqa
        (register_fa, apply_ants_registration_for_ad, [('forward_warp_field',    'in_bspline_transformation')]),  # noqa

        (inputnode,   apply_ants_registration_for_rd, [('in_rd',                 'in_image')]),  # noqa
        (inputnode,   apply_ants_registration_for_rd, [('in_atlas_scalar_image', 'in_reference_image')]),  # noqa
        (register_fa, apply_ants_registration_for_rd, [('out_matrix',            'in_affine_transformation')]),  # noqa
        (register_fa, apply_ants_registration_for_rd, [('forward_warp_field',    'in_bspline_transformation')]),  # noqa
        # Remove negative values from the DTI maps:
        (register_fa,                    thres_fa, [('warped_image',       'in_file')]),  # noqa
        (apply_ants_registration_for_md, thres_md, [('out_deformed_image', 'in_file')]),  # noqa
        (apply_ants_registration_for_rd, thres_rd, [('out_deformed_image', 'in_file')]),  # noqa
        (apply_ants_registration_for_ad, thres_ad, [('out_deformed_image', 'in_file')]),  # noqa
        # Outputnode:
        (thres_fa,    outputnode, [('out_file',           'out_norm_fa')]),  # noqa
        (register_fa, outputnode, [('out_matrix',         'out_affine_matrix'),  # noqa
                                   ('forward_warp_field', 'out_b_spline_transform'),  # noqa
                                   ('inverse_warp_field', 'out_inverse_warp')]),  # noqa
        (thres_md,    outputnode,  [('out_file',          'out_norm_md')]),  # noqa
        (thres_ad,    outputnode,  [('out_file',          'out_norm_ad')]),  # noqa
        (thres_rd,    outputnode,  [('out_file',          'out_norm_rd')])   # noqa
    ])

    return wf
Esempio n. 19
0
def init_smriprep_wf(
    debug,
    freesurfer,
    fs_subjects_dir,
    hires,
    layout,
    longitudinal,
    low_mem,
    omp_nthreads,
    output_dir,
    output_spaces,
    run_uuid,
    skull_strip_fixed_seed,
    skull_strip_template,
    subject_list,
    work_dir,
):
    """
    Create the execution graph of *sMRIPrep*, with a sub-workflow for each subject.

    If FreeSurfer's ``recon-all`` is to be run, a FreeSurfer derivatives folder is
    created and populated with any needed template subjects.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            import os
            from collections import OrderedDict, namedtuple
            BIDSLayout = namedtuple('BIDSLayout', ['root'])
            os.environ['FREESURFER_HOME'] = os.getcwd()
            from smriprep.workflows.base import init_smriprep_wf
            wf = init_smriprep_wf(
                debug=False,
                freesurfer=True,
                fs_subjects_dir=None,
                hires=True,
                layout=BIDSLayout('.'),
                longitudinal=False,
                low_mem=False,
                omp_nthreads=1,
                output_dir='.',
                output_spaces=OrderedDict([('MNI152NLin2009cAsym', {}),
                                           ('fsaverage5', {})]),
                run_uuid='testrun',
                skull_strip_fixed_seed=False,
                skull_strip_template=('OASIS30ANTs', {}),
                subject_list=['smripreptest'],
                work_dir='.',
            )

    Parameters
    ----------
    debug : bool
        Enable debugging outputs
    freesurfer : bool
        Enable FreeSurfer surface reconstruction (may increase runtime)
    fs_subjects_dir : os.PathLike or None
        Use existing FreeSurfer subjects directory if provided
    hires : bool
        Enable sub-millimeter preprocessing in FreeSurfer
    layout : BIDSLayout object
        BIDS dataset layout
    longitudinal : bool
        Treat multiple sessions as longitudinal (may increase runtime)
        See sub-workflows for specific differences
    low_mem : bool
        Write uncompressed .nii files in some cases to reduce memory usage
    omp_nthreads : int
        Maximum number of threads an individual process may use
    output_dir : str
        Directory in which to save derivatives
    output_spaces : OrderedDict
        List of spatial normalization targets. Some parts of pipeline will
        only be instantiated for some output spaces. Valid spaces:
        - Any template identifier from TemplateFlow
        - Path to a template folder organized following TemplateFlow's
        conventions
    run_uuid : str
        Unique identifier for execution instance
    skull_strip_fixed_seed : bool
        Do not use a random seed for skull-stripping - will ensure
        run-to-run replicability when used with --omp-nthreads 1
    skull_strip_template : tuple
        Name of ANTs skull-stripping template ('OASIS30ANTs' or 'NKI'),
        and dictionary with template specifications (e.g., {'res': '2'})
    subject_list : list
        List of subject labels
    work_dir : str
        Directory in which to store workflow execution state and
        temporary files

    """
    smriprep_wf = Workflow(name='smriprep_wf')
    smriprep_wf.base_dir = work_dir

    if freesurfer:
        fsdir = pe.Node(BIDSFreeSurferDir(
            derivatives=output_dir,
            freesurfer_home=os.getenv('FREESURFER_HOME'),
            spaces=[
                s for s in output_spaces.keys() if s.startswith('fsaverage')
            ] + ['fsnative'] * ('fsnative' in output_spaces)),
                        name='fsdir_run_%s' % run_uuid.replace('-', '_'),
                        run_without_submitting=True)
        if fs_subjects_dir is not None:
            fsdir.inputs.subjects_dir = str(fs_subjects_dir.absolute())

    reportlets_dir = os.path.join(work_dir, 'reportlets')
    for subject_id in subject_list:
        single_subject_wf = init_single_subject_wf(
            debug=debug,
            freesurfer=freesurfer,
            hires=hires,
            layout=layout,
            longitudinal=longitudinal,
            low_mem=low_mem,
            name="single_subject_%s_wf" % subject_id,
            omp_nthreads=omp_nthreads,
            output_dir=output_dir,
            output_spaces=output_spaces,
            reportlets_dir=reportlets_dir,
            skull_strip_fixed_seed=skull_strip_fixed_seed,
            skull_strip_template=skull_strip_template,
            subject_id=subject_id,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (os.path.join(
            output_dir, "smriprep", "sub-" + subject_id, 'log', run_uuid))
        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)
        if freesurfer:
            smriprep_wf.connect(fsdir, 'subjects_dir', single_subject_wf,
                                'inputnode.subjects_dir')
        else:
            smriprep_wf.add_nodes([single_subject_wf])

    return smriprep_wf
Esempio n. 20
0
 def set_infosource(self, opts):
     self.infosource = pe.Node(interface=init.SplitArgsRunning(), name="infosource")
     self.workflow.connect(self.preinfosource, 'args', self.infosource, "args")
Esempio n. 21
0
def diagnose(
    bids_base,
    components=None,
    debug=False,
    exclude={},
    include={},
    keep_crashdump=False,
    keep_work=False,
    match_regex='.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_acq-(?P<acq>[a-zA-Z0-9]+)_trial-(?P<trial>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+).(?:nii|nii\.gz)',
    n_procs=N_PROCS,
    realign="time",
    tr=None,
    workflow_name="diagnostic",
):
    '''Run a basic independent component analysis diagnotic (using FSL's MELODIC) on functional MRI data stored in a BIDS directory tree.

	Parameters
	----------

	bids_base : string, optional
		Path to the top level of a BIDS directory tree for which to perform the diagnostic.
	components : int, optional
		Number of independent components to produce for each functional measurement; if evaluated as False, the number of components is automatically optimized for the given data by FSL's MELODIC.
	debug : bool, optional
		Enable full nipype debugging support for the workflow construction and execution.
	exclude : dict, optional
		A dictionary with any subset of 'subject', 'session', 'acquisition', 'trial', 'modality', and 'path' as keys and corresponding identifiers as values.
		This is a blacklist: if this is specified only non-matching entries will be included in the analysis.
	include : dict, optional
		A dictionary with any subset of 'subject', 'session', 'acquisition', 'trial', 'modality', and 'path' as keys and corresponding identifiers as values.
		This is a whitelist: if this is specified only matching entries will be included in the analysis.
	keep_crashdump : bool, optional
		Whether to keep the crashdump directory (containing all the crash reports for intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	match_regex : str, optional
		Regex matching pattern by which to select input files. Has to contain groups named "sub", "ses", "acq", "trial", and "mod".
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	realign : {"space","time","spacetime",""}
		Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
	tr : int, optional
		Repetition time (in seconds); if evaluated as False, the TR will be read from the NIfTI header of each file individually.
	workflow_name : string, optional
		Name of the workflow execution. The output will be saved one level above the bids_base, under a directory bearing the name given here.
	'''

    bids_base = path.abspath(path.expanduser(bids_base))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = bids_base
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()

    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.acq, datafind_res.outputs.trial,
        datafind_res.outputs.mod, datafind_res.outputs.out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'trial', 'modality', 'path'))

    data_selection = data_selection.sort_values(['session', 'subject'],
                                                ascending=[1, 1])
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]

    data_selection['out_path'] = ''
    if data_selection['path'].str.contains('.nii.gz').any():
        data_selection['out_path'] = data_selection['path'].apply(
            lambda x: path.basename(
                path.splitext(path.splitext(x)[0])[0] + '_MELODIC'))
    else:
        data_selection['out_path'] = data_selection['path'].apply(
            lambda x: path.basename(path.splitext(x)[0] + '_MELODIC'))

    paths = data_selection['path']

    infosource = pe.Node(interface=util.IdentityInterface(
        fields=['path'], mandatory_inputs=False),
                         name="infosource")
    infosource.iterables = [('path', paths)]

    dummy_scans = pe.Node(
        name='dummy_scans',
        interface=util.Function(
            function=force_dummy_scans,
            input_names=inspect.getargspec(force_dummy_scans)[0],
            output_names=['out_file']))
    dummy_scans.inputs.desired_dummy_scans = 10

    bids_filename = pe.Node(name='bids_filename',
                            interface=util.Function(
                                function=out_path,
                                input_names=inspect.getargspec(out_path)[0],
                                output_names=['filename']))
    bids_filename.inputs.selection_df = data_selection

    bids_container = pe.Node(name='path_container',
                             interface=util.Function(
                                 function=container,
                                 input_names=inspect.getargspec(container)[0],
                                 output_names=['container']))
    bids_container.inputs.selection_df = data_selection

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.abspath(
        path.join(bids_base, '..', 'diagnostic'))
    datasink.inputs.parameterization = False

    melodic = pe.Node(interface=fsl.model.MELODIC(), name="melodic")
    if tr:
        melodic.inputs.tr_sec = tr
    melodic.inputs.report = True
    if components:
        melodic.inputs.dim = int(components)

    workflow_connections = [
        (infosource, dummy_scans, [('path', 'in_file')]),
        (infosource, bids_filename, [('path', 'in_path')]),
        (bids_filename, bids_container, [('filename', 'out_path')]),
        (bids_filename, melodic, [('filename', 'out_dir')]),
        (bids_container, datasink, [('container', 'container')]),
        (melodic, datasink, [('out_dir', 'func')]),
    ]

    if not tr:
        report_tr = pe.Node(name='report_tr',
                            interface=util.Function(
                                function=get_tr,
                                input_names=inspect.getargspec(get_tr)[0],
                                output_names=['tr']))
        report_tr.inputs.ndim = 4

        workflow_connections.extend([
            (infosource, report_tr, [('path', 'in_file')]),
            (report_tr, melodic, [('tr', 'tr_sec')]),
        ])

    if realign == "space":
        realigner = pe.Node(interface=spm.Realign(), name="realigner")
        realigner.inputs.register_to_mean = True
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
            (realigner, melodic, [('out_file', 'in_files')]),
        ])
    elif realign == "spacetime":
        realigner = pe.Node(interface=nipy.SpaceTimeRealigner(),
                            name="realigner")
        realigner.inputs.slice_times = "asc_alt_2"
        if tr:
            realigner.inputs.tr = tr
        else:
            workflow_connections.extend([
                (report_tr, realigner, [('tr', 'tr')]),
            ])
        realigner.inputs.slice_info = 3  #3 for coronal slices (2 for horizontal, 1 for sagittal)
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
            (realigner, melodic, [('out_file', 'in_files')]),
        ])
    elif realign == "time":
        realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
        if tr:
            realigner.inputs.time_repetition = tr
        else:
            workflow_connections.extend([
                (report_tr, realigner, [('tr', 'time_repetition')]),
            ])
        workflow_connections.extend([
            (dummy_scans, realigner, [('out_file', 'in_file')]),
            (realigner, melodic, [('slice_time_corrected_file', 'in_files')]),
        ])
    else:
        workflow_connections.extend([
            (dummy_scans, melodic, [('out_file', 'in_files')]),
        ])

    crashdump_dir = path.abspath(
        path.join(bids_base, '..', 'diagnostic_crashdump'))
    workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
    if debug:
        workflow_config['logging'] = {
            'workflow_level': 'DEBUG',
            'utils_level': 'DEBUG',
            'interface_level': 'DEBUG',
            'filemanip_level': 'DEBUG',
            'log_to_file': 'true',
        }

    workdir_name = 'diagnostic_work'
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = path.abspath(path.join(bids_base, '..'))
    workflow.config = workflow_config
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not keep_work or not keep_crashdump:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
        except RuntimeError:
            pass
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': n_procs})
    if not keep_work:
        shutil.rmtree(path.join(workflow.base_dir, workdir_name))
    if not keep_crashdump:
        try:
            shutil.rmtree(crashdump_dir)
        except (FileNotFoundError, OSError):
            pass

    return
Esempio n. 22
0
def analyze_openfmri_dataset(data_dir,
                             subject=None,
                             model_id=None,
                             task_id=None,
                             output_dir=None,
                             subj_prefix='*'):
    """Analyzes an open fmri dataset

    Parameters
    ----------

    data_dir : str
        Path to the base data directory

    work_dir : str
        Nipype working directory (defaults to cwd)
    """
    """
    Load nipype workflows
    """

    preproc = create_featreg_preproc(whichvol='first')
    modelfit = create_modelfit_workflow()
    fixed_fx = create_fixed_effects_flow()
    registration = create_reg_workflow()
    """
    Remove the plotting connection so that plot iterables don't propagate
    to the model stage
    """

    preproc.disconnect(preproc.get_node('plot_motion'), 'out_file',
                       preproc.get_node('outputspec'), 'motion_plots')
    """
    Set up openfmri data specific components
    """

    subjects = sorted([
        path.split(os.path.sep)[-1]
        for path in glob(os.path.join(data_dir, subj_prefix))
    ])

    infosource = pe.Node(
        niu.IdentityInterface(fields=['subject_id', 'model_id', 'task_id']),
        name='infosource')
    if len(subject) == 0:
        infosource.iterables = [('subject_id', subjects),
                                ('model_id', [model_id]), ('task_id', task_id)]
    else:
        infosource.iterables = [
            ('subject_id',
             [subjects[subjects.index(subj)] for subj in subject]),
            ('model_id', [model_id]), ('task_id', task_id)
        ]

    subjinfo = pe.Node(niu.Function(
        input_names=['subject_id', 'base_dir', 'task_id', 'model_id'],
        output_names=['run_id', 'conds', 'TR'],
        function=get_subjectinfo),
                       name='subjectinfo')
    subjinfo.inputs.base_dir = data_dir
    """
    Return data components as anat, bold and behav
    """

    datasource = pe.Node(nio.DataGrabber(
        infields=['subject_id', 'run_id', 'task_id', 'model_id'],
        outfields=['anat', 'bold', 'behav', 'contrasts']),
                         name='datasource')
    datasource.inputs.base_directory = data_dir
    datasource.inputs.template = '*'
    datasource.inputs.field_template = {
        'anat': '%s/anatomy/highres001.nii.gz',
        'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
        'behav': ('%s/model/model%03d/onsets/task%03d_'
                  'run%03d/cond*.txt'),
        'contrasts': ('models/model%03d/'
                      'task_contrasts.txt')
    }
    datasource.inputs.template_args = {
        'anat': [['subject_id']],
        'bold': [['subject_id', 'task_id']],
        'behav': [['subject_id', 'model_id', 'task_id', 'run_id']],
        'contrasts': [['model_id']]
    }
    datasource.inputs.sort_filelist = True
    """
    Create meta workflow
    """

    wf = pe.Workflow(name='openfmri')
    wf.connect(infosource, 'subject_id', subjinfo, 'subject_id')
    wf.connect(infosource, 'model_id', subjinfo, 'model_id')
    wf.connect(infosource, 'task_id', subjinfo, 'task_id')
    wf.connect(infosource, 'subject_id', datasource, 'subject_id')
    wf.connect(infosource, 'model_id', datasource, 'model_id')
    wf.connect(infosource, 'task_id', datasource, 'task_id')
    wf.connect(subjinfo, 'run_id', datasource, 'run_id')
    wf.connect([
        (datasource, preproc, [('bold', 'inputspec.func')]),
    ])

    def get_highpass(TR, hpcutoff):
        return hpcutoff / (2. * TR)

    gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'],
                                       output_names=['highpass'],
                                       function=get_highpass),
                          name='gethighpass')
    wf.connect(subjinfo, 'TR', gethighpass, 'TR')
    wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass')
    """
    Setup a basic set of contrasts, a t-test per condition
    """

    def get_contrasts(contrast_file, task_id, conds):
        import numpy as np
        contrast_def = np.genfromtxt(contrast_file, dtype=object)
        if len(contrast_def.shape) == 1:
            contrast_def = contrast_def[None, :]
        contrasts = []
        for row in contrast_def:
            if row[0] != 'task%03d' % task_id:
                continue
            con = [
                row[1], 'T', ['cond%03d' % (i + 1) for i in range(len(conds))],
                row[2:].astype(float).tolist()
            ]
            contrasts.append(con)
        # add auto contrasts for each column
        for i, cond in enumerate(conds):
            con = [cond, 'T', ['cond%03d' % (i + 1)], [1]]
            contrasts.append(con)
        return contrasts

    contrastgen = pe.Node(niu.Function(
        input_names=['contrast_file', 'task_id', 'conds'],
        output_names=['contrasts'],
        function=get_contrasts),
                          name='contrastgen')

    art = pe.MapNode(
        interface=ra.ArtifactDetect(use_differences=[True, False],
                                    use_norm=True,
                                    norm_threshold=1,
                                    zintensity_threshold=3,
                                    parameter_source='FSL',
                                    mask_type='file'),
        iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
        name="art")

    modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
    modelspec.inputs.input_units = 'secs'

    def check_behav_list(behav):
        out_behav = []
        if isinstance(behav, string_types):
            behav = [behav]
        for val in behav:
            if not isinstance(val, list):
                out_behav.append([val])
            else:
                out_behav.append(val)
        return out_behav

    wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
    wf.connect(datasource, ('behav', check_behav_list), modelspec,
               'event_files')
    wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
    wf.connect(subjinfo, 'conds', contrastgen, 'conds')
    wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
    wf.connect(infosource, 'task_id', contrastgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')

    wf.connect([(preproc, art,
                 [('outputspec.motion_parameters', 'realignment_parameters'),
                  ('outputspec.realigned_files', 'realigned_files'),
                  ('outputspec.mask', 'mask_file')]),
                (preproc, modelspec,
                 [('outputspec.highpassed_files', 'functional_runs'),
                  ('outputspec.motion_parameters', 'realignment_parameters')]),
                (art, modelspec, [('outlier_files', 'outlier_files')]),
                (modelspec, modelfit, [('session_info',
                                        'inputspec.session_info')]),
                (preproc, modelfit, [('outputspec.highpassed_files',
                                      'inputspec.functional_data')])])
    """
    Reorder the copes so that now it combines across runs
    """

    def sort_copes(files):
        numelements = len(files[0])
        outfiles = []
        for i in range(numelements):
            outfiles.insert(i, [])
            for j, elements in enumerate(files):
                outfiles[i].append(elements[i])
        return outfiles

    def num_copes(files):
        return len(files)

    pickfirst = lambda x: x[0]

    wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
                                      'flameo.mask_file')]),
                (modelfit, fixed_fx, [
                    (('outputspec.copes', sort_copes), 'inputspec.copes'),
                    ('outputspec.dof_file', 'inputspec.dof_files'),
                    (('outputspec.varcopes', sort_copes),
                     'inputspec.varcopes'),
                    (('outputspec.copes', num_copes), 'l2model.num_copes'),
                ])])

    wf.connect(preproc, 'outputspec.mean', registration,
               'inputspec.mean_image')
    wf.connect(datasource, 'anat', registration, 'inputspec.anatomical_image')
    registration.inputs.inputspec.target_image = fsl.Info.standard_image(
        'MNI152_T1_2mm.nii.gz')
    registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image(
        'MNI152_T1_2mm_brain.nii.gz')
    registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'

    def merge_files(copes, varcopes, zstats):
        out_files = []
        splits = []
        out_files.extend(copes)
        splits.append(len(copes))
        out_files.extend(varcopes)
        splits.append(len(varcopes))
        out_files.extend(zstats)
        splits.append(len(zstats))
        return out_files, splits

    mergefunc = pe.Node(niu.Function(
        input_names=['copes', 'varcopes', 'zstats'],
        output_names=['out_files', 'splits'],
        function=merge_files),
                        name='merge_files')
    wf.connect([(fixed_fx.get_node('outputspec'), mergefunc, [
        ('copes', 'copes'),
        ('varcopes', 'varcopes'),
        ('zstats', 'zstats'),
    ])])
    wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files')

    def split_files(in_files, splits):
        copes = in_files[:splits[0]]
        varcopes = in_files[splits[0]:(splits[0] + splits[1])]
        zstats = in_files[(splits[0] + splits[1]):]
        return copes, varcopes, zstats

    splitfunc = pe.Node(niu.Function(
        input_names=['in_files', 'splits'],
        output_names=['copes', 'varcopes', 'zstats'],
        function=split_files),
                        name='split_files')
    wf.connect(mergefunc, 'splits', splitfunc, 'splits')
    wf.connect(registration, 'outputspec.transformed_files', splitfunc,
               'in_files')
    """
    Connect to a datasink
    """

    def get_subs(subject_id, conds, model_id, task_id):
        subs = [('_subject_id_%s_' % subject_id, '')]
        subs.append(('_model_id_%d' % model_id, 'model%03d' % model_id))
        subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id))
        subs.append(
            ('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp', 'mean'))
        subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_flirt',
                     'affine'))

        for i in range(len(conds)):
            subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1)))
            subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1)))
            subs.append(('_warpall%d/cope1_warp.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_warpall%d/varcope1_warp.' % (len(conds) + i),
                         'varcope%02d.' % (i + 1)))
            subs.append(('_warpall%d/zstat1_warp.' % (2 * len(conds) + i),
                         'zstat%02d.' % (i + 1)))
        return subs

    subsgen = pe.Node(niu.Function(
        input_names=['subject_id', 'conds', 'model_id', 'task_id'],
        output_names=['substitutions'],
        function=get_subs),
                      name='subsgen')

    datasink = pe.Node(interface=nio.DataSink(), name="datasink")
    wf.connect(infosource, 'subject_id', datasink, 'container')
    wf.connect(infosource, 'subject_id', subsgen, 'subject_id')
    wf.connect(infosource, 'model_id', subsgen, 'model_id')
    wf.connect(infosource, 'task_id', subsgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', subsgen, 'conds')
    wf.connect(subsgen, 'substitutions', datasink, 'substitutions')
    wf.connect([(fixed_fx.get_node('outputspec'), datasink,
                 [('res4d', 'res4d'), ('copes', 'copes'),
                  ('varcopes', 'varcopes'), ('zstats', 'zstats'),
                  ('tstats', 'tstats')])])
    wf.connect([(splitfunc, datasink, [
        ('copes', 'copes.mni'),
        ('varcopes', 'varcopes.mni'),
        ('zstats', 'zstats.mni'),
    ])])
    wf.connect(registration, 'outputspec.transformed_mean', datasink,
               'mean.mni')
    wf.connect(registration, 'outputspec.func2anat_transform', datasink,
               'xfm.mean2anat')
    wf.connect(registration, 'outputspec.anat2target_transform', datasink,
               'xfm.anat2target')
    """
    Set processing parameters
    """

    hpcutoff = 120.
    preproc.inputs.inputspec.fwhm = 6.0
    gethighpass.inputs.hpcutoff = hpcutoff
    modelspec.inputs.high_pass_filter_cutoff = hpcutoff
    modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}}
    modelfit.inputs.inputspec.model_serial_correlations = True
    modelfit.inputs.inputspec.film_threshold = 1000

    datasink.inputs.base_directory = output_dir
    return wf
Esempio n. 23
0
def create_sca(name_sca='sca'):

    """
    Map of the correlations of the Region of Interest(Seed in native or MNI space) with the rest of brain voxels.
    The map is normalized to contain Z-scores, mapped in standard space and treated with spatial smoothing.

    Parameters
    ----------

    name_sca : a string
        Name of the SCA workflow

    Returns
    -------

    sca_workflow : workflow

        Seed Based Correlation Analysis Workflow



    Notes
    -----

    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/sca/sca.py>`_ 

    Workflow Inputs::
 

        inputspec.rest_res_filt : string (existing nifti file)
            Band passed Image with Global Signal , white matter, csf and motion regression. Recommended bandpass filter (0.001,0.1) )

        inputspec.timeseries_one_d : string (existing nifti file)
            1D 3dTcorr1D compatible timeseries file. 1D file can be timeseries from a mask or from a parcellation containing ROIs



        
    Workflow Outputs::

        outputspec.correlation_file : string (nifti file)
            Correlations of the functional file and the input time series 

        outputspec.Z_score : string (nifti file)
            Fisher Z transformed correlations of the seed 


    SCA Workflow Procedure:

    1. Compute pearson correlation between input timeseries 1D file and input functional file
       Use 3dTcorr1D to compute that. Input timeseries can be a 1D file containing parcellation ROI's
       or a 3D mask

    2. Compute Fisher Z score of the correlation computed in step above. If a mask is provided then a 
       a single Z score file is returned, otherwise z-scores for all ROIs are returned as a list of 
       nifti files 
    
    
    
    Workflow:
    
    .. image:: ../images/sca_graph.dot.png
        :width: 500 
    
    Detailed Workflow:
    
    .. image:: ../images/sca_detailed_graph.dot.png
        :width: 500 


    Examples
    --------
    
    >>> sca_w = create_sca("sca_wf")
    >>> sca_w.inputs.inputspec.functional_file = '/home/data/subject/func/rest_bandpassed.nii.gz'
    >>> sca_w.inputs.inputspec.timeseries_one_d = '/home/data/subject/func/ts.1D' 
    >>> sca_w.run() # doctest: +SKIP

    """

    from CPAC.utils.utils import get_roi_num_list

    sca = pe.Workflow(name=name_sca)
    inputNode = pe.Node(util.IdentityInterface(fields=['timeseries_one_d',
                                                'functional_file',
                                                ]),
                        name='inputspec')


    outputNode = pe.Node(util.IdentityInterface(fields=[
                                                    'correlation_stack',
                                                    'correlation_files',
                                                    'Z_score',
                                                    ]),
                        name='outputspec')



    # # 2. Compute voxel-wise correlation with Seed Timeseries
    corr = pe.Node(interface=preprocess.TCorr1D(),
                      name='3dTCorr1D')
    corr.inputs.pearson = True
    corr.inputs.outputtype = 'NIFTI_GZ'

    sca.connect(inputNode, 'timeseries_one_d',
                corr, 'y_1d')
    sca.connect(inputNode, 'functional_file',
                corr, 'xset')


    if "roi" in name_sca:

        # Transform the sub-bricks into volumes
        concat = pe.Node(interface=preprocess.TCat(),
                          name='3dTCat')
        concat.inputs.outputtype = 'NIFTI_GZ'

        # also write out volumes as individual files
        split = pe.Node(interface=fsl.Split(), name='split_raw_volumes_sca')
        split.inputs.dimension = 't'
        split.inputs.out_base_name = 'sca_'

        get_roi_num_list = pe.Node(util.Function(input_names=['timeseries_file', 'prefix'],
                                                 output_names=['roi_list'],
                                                 function=get_roi_num_list), 
                                   name='get_roi_num_list')
        get_roi_num_list.inputs.prefix = "sca"

        rename_rois = pe.MapNode(interface=util.Rename(), name='output_rois',
                          iterfield=['in_file','format_string'])

        rename_rois.inputs.keep_ext = True


        sca.connect(corr, 'out_file', concat, 'in_files')

        sca.connect(concat, 'out_file', split, 'in_file')

        sca.connect(concat, 'out_file',
                    outputNode, 'correlation_stack')

        sca.connect(inputNode, 'timeseries_one_d', get_roi_num_list,
                    'timeseries_file')

        sca.connect(split, 'out_files', rename_rois, 'in_file')

        sca.connect(get_roi_num_list, 'roi_list', rename_rois, 'format_string')

        sca.connect(rename_rois, 'out_file', outputNode,
                    'correlation_files')

    else:

        sca.connect(corr, 'out_file', outputNode, 'correlation_files')



    return sca
Esempio n. 24
0
def init_confound_regression_wf(cr_opts, name="confound_regression_wf"):

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'brain_mask', 'csf_mask', 'confounds_file', 'FD_file'
    ]),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['cleaned_path', 'aroma_out', 'VE_file', 'CR_data_dict']),
                         name='outputnode')

    regress_node = pe.Node(Function(
        input_names=['bold_file', 'data_dict', 'brain_mask_file', 'cr_opts'],
        output_names=['cleaned_path', 'VE_file_path', 'data_dict'],
        function=regress),
                           name='regress',
                           mem_gb=1)
    regress_node.inputs.cr_opts = cr_opts

    prep_CR_node = pe.Node(Function(input_names=[
        'bold_file', 'brain_mask_file', 'confounds_file', 'FD_file', 'cr_opts'
    ],
                                    output_names=['data_dict'],
                                    function=prep_CR),
                           name='prep_CR',
                           mem_gb=1)
    prep_CR_node.inputs.cr_opts = cr_opts

    workflow.connect([
        (inputnode, prep_CR_node, [
            ("bold_file", "bold_file"),
            ("brain_mask", "brain_mask_file"),
            ("confounds_file", "confounds_file"),
            ("FD_file", "FD_file"),
        ]),
        (inputnode, regress_node, [
            ("brain_mask", "brain_mask_file"),
        ]),
        (prep_CR_node, regress_node, [
            ("data_dict", "data_dict"),
        ]),
        (regress_node, outputnode, [
            ("cleaned_path", "cleaned_path"),
            ("VE_file_path", "VE_file"),
            ("data_dict", "CR_data_dict"),
        ]),
    ])

    if cr_opts.run_aroma:
        ica_aroma_node = pe.Node(Function(
            input_names=[
                'inFile', 'mc_file', 'brain_mask', 'csf_mask', 'tr',
                'aroma_dim'
            ],
            output_names=['cleaned_file', 'aroma_out'],
            function=exec_ICA_AROMA),
                                 name='ica_aroma',
                                 mem_gb=1)
        ica_aroma_node.inputs.tr = float(cr_opts.TR.split('s')[0])
        ica_aroma_node.inputs.aroma_dim = cr_opts.aroma_dim

        workflow.connect([
            (inputnode, ica_aroma_node, [
                ("bold_file", "inFile"),
                ("brain_mask", "brain_mask"),
                ("confounds_file", "mc_file"),
                ("csf_mask", "csf_mask"),
            ]),
            (ica_aroma_node, regress_node, [
                ("cleaned_file", "bold_file"),
            ]),
            (ica_aroma_node, outputnode, [
                ("aroma_out", "aroma_out"),
            ]),
        ])
    else:
        workflow.connect([
            (inputnode, regress_node, [
                ("bold_file", "bold_file"),
            ]),
        ])

    return workflow
Esempio n. 25
0
def init_bold_t2s_wf(echo_times, mem_gb, omp_nthreads, name='bold_t2s_wf'):
    """
    Combine multiple echos of :abbr:`ME-EPI (multi-echo echo-planar imaging)`.

    This workflow wraps the `tedana`_ `T2* workflow`_ to optimally
    combine multiple echos and derive a T2* map.
    The following steps are performed:

    #. :abbr:`HMC (head motion correction)` on individual echo files.
    #. Compute the T2* map
    #. Create an optimally combined ME-EPI time series

    .. _tedana: https://github.com/me-ica/tedana
    .. _`T2* workflow`: https://tedana.readthedocs.io/en/latest/generated/tedana.workflows.t2smap_workflow.html#tedana.workflows.t2smap_workflow  # noqa

    Parameters
    ----------
    echo_times : :obj:`list` or :obj:`tuple`
        list of TEs associated with each echo
    mem_gb : :obj:`float`
        Size of BOLD file in GB
    omp_nthreads : :obj:`int`
        Maximum number of threads an individual process may use
    name : :obj:`str`
        Name of workflow (default: ``bold_t2s_wf``)

    Inputs
    ------
    bold_file
        list of individual echo files

    Outputs
    -------
    bold
        the optimally combined time series for all supplied echos

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A T2\\* map was estimated from the preprocessed BOLD by fitting to a monoexponential signal
decay model with nonlinear regression, using T2\\*/S0 estimates from a log-linear
regression fit as initial values.
For each voxel, the maximal number of echoes with reliable signal in that voxel were
used to fit the model.
The calculated T2\\* map was then used to optimally combine preprocessed BOLD across
echoes following the method described in [@posse_t2s].
The optimally combined time series was carried forward as the *preprocessed BOLD*.
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['bold']),
                         name='outputnode')

    LOGGER.log(
        25, 'Generating T2* map and optimally combined ME-EPI time series.')

    t2smap_node = pe.Node(T2SMap(echo_times=list(echo_times)),
                          name='t2smap_node')

    workflow.connect([
        (inputnode, t2smap_node, [('bold_file', 'in_files')]),
        (t2smap_node, outputnode, [('optimal_comb', 'bold')]),
    ])

    return workflow
Esempio n. 26
0
def create_register_structural_to_diff(name='structural_to_diff'):
    """Co-register images using an affine transformation.

    Example
    -------

    >>> from tn_image_processing.workflows import registration
    >>> registration = registration.create_affine_registration()
    >>> registration.inputs.inputnode.structural = ['sub
    >>> registration.inputs.inputnode.dwi = ['DTI.nii.gz']

    Inputs::

        [Mandatory]
        inputnode.structural_list: list of hi-resolution structural
                                   images
        inputnode.dwi_list: list of 4dim diffusion volumes

    Outputs::

        outputnode.struct_to_b0: structural image registered to b0
                                 space
        outputnode.struct_to_b0_mat: affine matrix used to register
                                     structural image to b0 space

    """

    # Define the inputnode
    inputnode = pe.Node(interface=util.IdentityInterface(
        fields=["structural_list", "dwi_list"]),
                        name="inputnode")

    # Get first volume from 4d diffusion image (we assume this is b0)
    extract_b0 = pe.MapNode(interface=fslutil.ExtractROI(
        roi_file='DTI_first.nii.gz', t_min=0, t_size=1),
                            iterfield=['in_file'],
                            name='extract_b0')

    # Align structural image to b0
    flirt = pe.MapNode(interface=fslpre.FLIRT(dof=6,
                                              cost='mutualinfo',
                                              usesqform=True,
                                              out_file="T1_to_b0.nii.gz",
                                              out_matrix_file="T1_to_b0.mat"),
                       iterfield=['in_file', 'ref_file'],
                       name="flirt")

    # Define this workflow
    structural_to_diff = pe.Workflow(name=name)

    # Connect components of this workflow
    structural_to_diff.connect([
        (inputnode, extract_b0, [("dwi", "in_file")]),
        (inputnode, flirt, [("structural", "in_file")]),
        (extract_b0, flirt, [("roi_file", "ref_file")]),
    ])

    # Define the outputnode
    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=['struct_to_b0', 'struct_to_b0_mat']))

    # Connect the output
    structural_to_diff.connect([
        (flirt, outputnode, [('out_file', 'struct_to_b0'),
                             ('out_matrix_file', 'struct_to_b0_mat')]),
    ])

    return structural_to_diff
Esempio n. 27
0
def create_connectivity_pipeline(name="connectivity", parcellation_name='scale500'):
    """Creates a pipeline that does the same connectivity processing as in the
    :ref:`example_dmri_connectivity_advanced` example script. Given a subject id (and completed Freesurfer reconstruction)
    diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome
    as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org).

    Example
    -------

    >>> from nipype.workflows.dmri.mrtrix.connectivity_mapping import create_connectivity_pipeline
    >>> conmapper = create_connectivity_pipeline("nipype_conmap")
    >>> conmapper.inputs.inputnode.subjects_dir = '.'
    >>> conmapper.inputs.inputnode.subject_id = 'subj1'
    >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz'
    >>> conmapper.inputs.inputnode.bvecs = 'bvecs'
    >>> conmapper.inputs.inputnode.bvals = 'bvals'
    >>> conmapper.run()                 # doctest: +SKIP

    Inputs::

        inputnode.subject_id
        inputnode.subjects_dir
        inputnode.dwi
        inputnode.bvecs
        inputnode.bvals
        inputnode.resolution_network_file

    Outputs::

        outputnode.connectome
        outputnode.cmatrix
        outputnode.networks
        outputnode.fa
        outputnode.struct
        outputnode.tracts
        outputnode.rois
        outputnode.odfs
        outputnode.filtered_tractography
        outputnode.tdi
        outputnode.nxstatscff
        outputnode.nxcsv
        outputnode.cmatrices_csv
        outputnode.mean_fiber_length
        outputnode.median_fiber_length
        outputnode.fiber_length_std
    """

    inputnode_within = pe.Node(util.IdentityInterface(fields=["subject_id",
                                                              "dwi",
                                                              "bvecs",
                                                              "bvals",
                                                              "subjects_dir",
                                                              "resolution_network_file"]),
                               name="inputnode_within")

    FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource')
    FreeSurferSourceLH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceLH')
    FreeSurferSourceLH.inputs.hemi = 'lh'

    FreeSurferSourceRH = pe.Node(interface=nio.FreeSurferSource(), name='fssourceRH')
    FreeSurferSourceRH.inputs.hemi = 'rh'

    """
    Creating the workflow's nodes
    =============================
    """

    """
    Conversion nodes
    ----------------
    """

    """
    A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject.
    Nodes are used to convert the following:
        * Original structural image to NIFTI
        * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer
        * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI

    """

    mri_convert_Brain = pe.Node(interface=fs.MRIConvert(), name='mri_convert_Brain')
    mri_convert_Brain.inputs.out_type = 'nii'
    mri_convert_ROI_scale500 = mri_convert_Brain.clone('mri_convert_ROI_scale500')

    mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH')
    mris_convertLH.inputs.out_datatype = 'gii'
    mris_convertRH = mris_convertLH.clone('mris_convertRH')
    mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite')
    mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite')
    mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated')
    mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated')
    mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere')
    mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere')
    mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels')
    mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels')

    """
    Diffusion processing nodes
    --------------------------

    .. seealso::

        dmri_mrtrix_dti.py
            Tutorial that focuses solely on the MRtrix diffusion processing

        http://www.brain.org.au/software/mrtrix/index.html
            MRtrix's online documentation
    """

    """
    b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix.
    """

    fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(),name='fsl2mrtrix')

    """
    Distortions induced by eddy currents are corrected prior to fitting the tensors.
    The first image is used as a reference for which to warp the others.
    """

    eddycorrect = create_eddy_correct_pipeline(name='eddycorrect')
    eddycorrect.inputs.inputnode.ref_num = 1

    """
    Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created:
        * Major eigenvector in each voxel
        * Apparent diffusion coefficient
        * Fractional anisotropy
    """

    dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(),name='dwi2tensor')
    tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(),name='tensor2vector')
    tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(),name='tensor2adc')
    tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(),name='tensor2fa')
    MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert_fa')
    MRconvert_fa.inputs.extension = 'nii'

    """

    These nodes are used to create a rough brain mask from the b0 image.
    The b0 image is extracted from the original diffusion-weighted image,
    put through a simple thresholding routine, and smoothed using a 3x3 median filter.
    """

    MRconvert = pe.Node(interface=mrtrix.MRConvert(),name='MRconvert')
    MRconvert.inputs.extract_at_axis = 3
    MRconvert.inputs.extract_at_coordinate = [0]
    threshold_b0 = pe.Node(interface=mrtrix.Threshold(),name='threshold_b0')
    median3d = pe.Node(interface=mrtrix.MedianFilter3D(),name='median3d')

    """
    The brain mask is also used to help identify single-fiber voxels.
    This is done by passing the brain mask through two erosion steps,
    multiplying the remaining mask with the fractional anisotropy map, and
    thresholding the result to obtain some highly anisotropic within-brain voxels.
    """

    erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_firstpass')
    erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),name='erode_mask_secondpass')
    MRmultiply = pe.Node(interface=mrtrix.MRMultiply(),name='MRmultiply')
    MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')
    threshold_FA = pe.Node(interface=mrtrix.Threshold(),name='threshold_FA')
    threshold_FA.inputs.absolute_threshold_value = 0.7

    """
    For whole-brain tracking we also require a broad white-matter seed mask.
    This is created by generating a white matter mask, given a brainmask, and
    thresholding it at a reasonably high level.
    """

    bet = pe.Node(interface=fsl.BET(mask = True), name = 'bet_b0')
    gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(),name='gen_WM_mask')
    threshold_wmmask = pe.Node(interface=mrtrix.Threshold(),name='threshold_wmmask')
    threshold_wmmask.inputs.absolute_threshold_value = 0.4

    """
    The spherical deconvolution step depends on the estimate of the response function
    in the highly anisotropic voxels we obtained above.

    .. warning::

        For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps.

    """

    estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),name='estimateresponse')
    estimateresponse.inputs.maximum_harmonic_order = 6
    csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),name='csdeconv')
    csdeconv.inputs.maximum_harmonic_order = 6

    """
    Finally, we track probabilistically using the orientation distribution functions obtained earlier.
    The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format.
    """

    probCSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(),name='probCSDstreamtrack')
    probCSDstreamtrack.inputs.inputmodel = 'SD_PROB'
    probCSDstreamtrack.inputs.desired_number_of_tracks = 150000
    tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(),name='tracks2prob')
    tracks2prob.inputs.colour = True
    MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob')
    tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(),name='tck2trk')
    trk2tdi = pe.Node(interface=dipy.TrackDensityMap(),name='trk2tdi')

    """
    Structural segmentation nodes
    -----------------------------
    """

    """
    The following node identifies the transformation between the diffusion-weighted
    image and the structural image. This transformation is then applied to the tracts
    so that they are in the same space as the regions of interest.
    """

    coregister = pe.Node(interface=fsl.FLIRT(dof=6), name = 'coregister')
    coregister.inputs.cost = ('normmi')

    """
    Parcellation is performed given the aparc+aseg image from Freesurfer.
    The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme.
    The parcellation used here is entitled "scale500" and returns 1015 regions.
    """

    parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate")
    parcellate.inputs.parcellation_name = parcellation_name

    """
    The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts
    and outputs a number of different files. The most important of which is the connectivity network itself, which is stored
    as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various
    NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and
    standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the
    specific tracts that connect between user-selected regions.

    Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step.
    """

    creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix")
    creatematrix.inputs.count_region_intersections = True

    """
    Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use
    the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file.
    The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to
    look back at the processing parameters that were used.
    """

    CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter")
    CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))
    giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces")
    giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels")
    niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes")
    fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays")

    """
    We also create a node to calculate several network metrics on our resulting file, and another CFF converter
    which will be used to package these networks into a single file.
    """

    networkx = create_networkx_pipeline(name='networkx')
    cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv')
    nfibs_to_csv = pe.Node(interface=misc.Matlab2CSV(), name='nfibs_to_csv')
    merge_nfib_csvs = pe.Node(interface=misc.MergeCSVFiles(), name='merge_nfib_csvs')
    merge_nfib_csvs.inputs.extra_column_heading = 'Subject'
    merge_nfib_csvs.inputs.out_file = 'fibers.csv'
    NxStatsCFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="NxStatsCFFConverter")
    NxStatsCFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))

    """
    Connecting the workflow
    =======================
    Here we connect our processing pipeline.
    """


    """
    Connecting the inputs, FreeSurfer nodes, and conversions
    --------------------------------------------------------
    """

    mapping = pe.Workflow(name='mapping')

    """
    First, we connect the input node to the FreeSurfer input nodes.
    """

    mapping.connect([(inputnode_within, FreeSurferSource,[("subjects_dir","subjects_dir")])])
    mapping.connect([(inputnode_within, FreeSurferSource,[("subject_id","subject_id")])])

    mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subjects_dir","subjects_dir")])])
    mapping.connect([(inputnode_within, FreeSurferSourceLH,[("subject_id","subject_id")])])

    mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])])
    mapping.connect([(inputnode_within, FreeSurferSourceRH,[("subject_id","subject_id")])])

    mapping.connect([(inputnode_within, parcellate,[("subjects_dir","subjects_dir")])])
    mapping.connect([(inputnode_within, parcellate,[("subject_id","subject_id")])])
    mapping.connect([(parcellate, mri_convert_ROI_scale500,[('roi_file','in_file')])])

    """
    Nifti conversion for subject's stripped brain image from Freesurfer:
    """

    mapping.connect([(FreeSurferSource, mri_convert_Brain,[('brain','in_file')])])

    """
    Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres)
    """

    mapping.connect([(FreeSurferSourceLH, mris_convertLH,[('pial','in_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRH,[('pial','in_file')])])
    mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite,[('white','in_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite,[('white','in_file')])])
    mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated,[('inflated','in_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated,[('inflated','in_file')])])
    mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere,[('sphere','in_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere,[('sphere','in_file')])])

    """
    The annotation files are converted using the pial surface as a map via the MRIsConvert interface.
    One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files
    specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource.
    """

    mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels,[('pial','in_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels,[('pial','in_file')])])
    mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [(('annot', select_aparc_annot), 'annot_file')])])
    mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [(('annot', select_aparc_annot), 'annot_file')])])


    """
    Diffusion Processing
    --------------------
    Now we connect the tensor computations:
    """

    mapping.connect([(inputnode_within, fsl2mrtrix, [("bvecs", "bvec_file"),
                                                    ("bvals", "bval_file")])])
    mapping.connect([(inputnode_within, eddycorrect,[("dwi","inputnode.in_file")])])
    mapping.connect([(eddycorrect, dwi2tensor,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(fsl2mrtrix, dwi2tensor,[("encoding_file","encoding_file")])])

    mapping.connect([(dwi2tensor, tensor2vector,[['tensor','in_file']]),
                           (dwi2tensor, tensor2adc,[['tensor','in_file']]),
                           (dwi2tensor, tensor2fa,[['tensor','in_file']]),
                          ])
    mapping.connect([(tensor2fa, MRmult_merge,[("FA","in1")])])
    mapping.connect([(tensor2fa, MRconvert_fa,[("FA","in_file")])])

    """

    This block creates the rough brain mask to be multiplied, mulitplies it with the
    fractional anisotropy image, and thresholds it to get the single-fiber voxels.
    """

    mapping.connect([(eddycorrect, MRconvert,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(MRconvert, threshold_b0,[("converted","in_file")])])
    mapping.connect([(threshold_b0, median3d,[("out_file","in_file")])])
    mapping.connect([(median3d, erode_mask_firstpass,[("out_file","in_file")])])
    mapping.connect([(erode_mask_firstpass, erode_mask_secondpass,[("out_file","in_file")])])
    mapping.connect([(erode_mask_secondpass, MRmult_merge,[("out_file","in2")])])
    mapping.connect([(MRmult_merge, MRmultiply,[("out","in_files")])])
    mapping.connect([(MRmultiply, threshold_FA,[("out_file","in_file")])])

    """
    Here the thresholded white matter mask is created for seeding the tractography.
    """

    mapping.connect([(eddycorrect, bet,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(eddycorrect, gen_WM_mask,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(bet, gen_WM_mask,[("mask_file","binary_mask")])])
    mapping.connect([(fsl2mrtrix, gen_WM_mask,[("encoding_file","encoding_file")])])
    mapping.connect([(gen_WM_mask, threshold_wmmask,[("WMprobabilitymap","in_file")])])

    """
    Next we estimate the fiber response distribution.
    """

    mapping.connect([(eddycorrect, estimateresponse,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(fsl2mrtrix, estimateresponse,[("encoding_file","encoding_file")])])
    mapping.connect([(threshold_FA, estimateresponse,[("out_file","mask_image")])])

    """
    Run constrained spherical deconvolution.
    """

    mapping.connect([(eddycorrect, csdeconv,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(gen_WM_mask, csdeconv,[("WMprobabilitymap","mask_image")])])
    mapping.connect([(estimateresponse, csdeconv,[("response","response_file")])])
    mapping.connect([(fsl2mrtrix, csdeconv,[("encoding_file","encoding_file")])])

    """
    Connect the tractography and compute the tract density image.
    """

    mapping.connect([(threshold_wmmask, probCSDstreamtrack,[("out_file","seed_file")])])
    mapping.connect([(csdeconv, probCSDstreamtrack,[("spherical_harmonics_image","in_file")])])
    mapping.connect([(probCSDstreamtrack, tracks2prob,[("tracked","in_file")])])
    mapping.connect([(eddycorrect, tracks2prob,[("outputnode.eddy_corrected","template_file")])])
    mapping.connect([(tracks2prob, MRconvert_tracks2prob,[("tract_image","in_file")])])

    """
    Structural Processing
    ---------------------
    First, we coregister the diffusion image to the structural image
    """

    mapping.connect([(eddycorrect, coregister,[("outputnode.eddy_corrected","in_file")])])
    mapping.connect([(mri_convert_Brain, coregister,[('out_file','reference')])])

    """
    The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI).
    The connectivity matrix is created with the transformed .trk fibers and the parcellation file.
    """

    mapping.connect([(eddycorrect, tck2trk,[("outputnode.eddy_corrected","image_file")])])
    mapping.connect([(mri_convert_Brain, tck2trk,[("out_file","registration_image_file")])])
    mapping.connect([(coregister, tck2trk,[("out_matrix_file","matrix_file")])])
    mapping.connect([(probCSDstreamtrack, tck2trk,[("tracked","in_file")])])
    mapping.connect([(tck2trk, creatematrix,[("out_file","tract_file")])])
    mapping.connect([(tck2trk, trk2tdi,[("out_file","in_file")])])
    mapping.connect(inputnode_within, 'resolution_network_file',
                    creatematrix, 'resolution_network_file')
    mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_file")])])
    mapping.connect([(inputnode_within, creatematrix,[("subject_id","out_matrix_mat_file")])])
    mapping.connect([(parcellate, creatematrix,[("roi_file","roi_file")])])

    """
    The merge nodes defined earlier are used here to create lists of the files which are
    destined for the CFFConverter.
    """

    mapping.connect([(mris_convertLH, giftiSurfaces,[("converted","in1")])])
    mapping.connect([(mris_convertRH, giftiSurfaces,[("converted","in2")])])
    mapping.connect([(mris_convertLHwhite, giftiSurfaces,[("converted","in3")])])
    mapping.connect([(mris_convertRHwhite, giftiSurfaces,[("converted","in4")])])
    mapping.connect([(mris_convertLHinflated, giftiSurfaces,[("converted","in5")])])
    mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])])
    mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])])
    mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])])

    mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])])
    mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])])

    mapping.connect([(parcellate, niftiVolumes,[("roi_file","in1")])])
    mapping.connect([(eddycorrect, niftiVolumes,[("outputnode.eddy_corrected","in2")])])
    mapping.connect([(mri_convert_Brain, niftiVolumes,[("out_file","in3")])])

    mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file","in1")])])
    mapping.connect([(creatematrix, fiberDataArrays,[("endpoint_file_mm","in2")])])
    mapping.connect([(creatematrix, fiberDataArrays,[("fiber_length_file","in3")])])
    mapping.connect([(creatematrix, fiberDataArrays,[("fiber_label_file","in4")])])

    """
    This block actually connects the merged lists to the CFF converter. We pass the surfaces
    and volumes that are to be included, as well as the tracts and the network itself. The currently
    running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This
    makes it easy for the user to examine the entire processing pathway used to generate the end
    product.
    """

    mapping.connect([(giftiSurfaces, CFFConverter,[("out","gifti_surfaces")])])
    mapping.connect([(giftiLabels, CFFConverter,[("out","gifti_labels")])])
    mapping.connect([(creatematrix, CFFConverter,[("matrix_files","gpickled_networks")])])
    mapping.connect([(niftiVolumes, CFFConverter,[("out","nifti_volumes")])])
    mapping.connect([(fiberDataArrays, CFFConverter,[("out","data_files")])])
    mapping.connect([(creatematrix, CFFConverter,[("filtered_tractography","tract_files")])])
    mapping.connect([(inputnode_within, CFFConverter,[("subject_id","title")])])

    """
    The graph theoretical metrics which have been generated are placed into another CFF file.
    """

    mapping.connect([(inputnode_within, networkx,[("subject_id","inputnode.extra_field")])])
    mapping.connect([(creatematrix, networkx,[("intersection_matrix_file","inputnode.network_file")])])

    mapping.connect([(networkx, NxStatsCFFConverter,[("outputnode.network_files","gpickled_networks")])])
    mapping.connect([(giftiSurfaces, NxStatsCFFConverter,[("out","gifti_surfaces")])])
    mapping.connect([(giftiLabels, NxStatsCFFConverter,[("out","gifti_labels")])])
    mapping.connect([(niftiVolumes, NxStatsCFFConverter,[("out","nifti_volumes")])])
    mapping.connect([(fiberDataArrays, NxStatsCFFConverter,[("out","data_files")])])
    mapping.connect([(inputnode_within, NxStatsCFFConverter,[("subject_id","title")])])

    mapping.connect([(inputnode_within, cmats_to_csv,[("subject_id","inputnode.extra_field")])])
    mapping.connect([(creatematrix, cmats_to_csv,[("matlab_matrix_files","inputnode.matlab_matrix_files")])])
    mapping.connect([(creatematrix, nfibs_to_csv,[("stats_file","in_file")])])
    mapping.connect([(nfibs_to_csv, merge_nfib_csvs,[("csv_files","in_files")])])
    mapping.connect([(inputnode_within, merge_nfib_csvs,[("subject_id","extra_field")])])


    """
    Create a higher-level workflow
    --------------------------------------
    Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes
    declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding
    their names to the subject list and their data to the proper folders.
    """

    inputnode = pe.Node(interface=util.IdentityInterface(fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir"]), name="inputnode")

    outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa",
                                                                "struct",
                                                                "tracts",
                                                                "tracks2prob",
                                                                "connectome",
                                                                "nxstatscff",
                                                                "nxmatlab",
                                                                "nxcsv",
                                                                "fiber_csv",
                                                                "cmatrices_csv",
                                                                "nxmergedcsv",
                                                                "cmatrix",
                                                                "networks",
                                                                "filtered_tracts",
                                                                "rois",
                                                                "odfs",
                                                                "tdi",
                                                                "mean_fiber_length",
                                                                "median_fiber_length",
                                                                "fiber_length_std"]),
                                        name="outputnode")

    connectivity = pe.Workflow(name="connectivity")
    connectivity.base_output_dir=name
    connectivity.base_dir=name

    connectivity.connect([(inputnode, mapping, [("dwi", "inputnode_within.dwi"),
                                              ("bvals", "inputnode_within.bvals"),
                                              ("bvecs", "inputnode_within.bvecs"),
                                              ("subject_id", "inputnode_within.subject_id"),
                                              ("subjects_dir", "inputnode_within.subjects_dir")])
                                              ])

    connectivity.connect([(mapping, outputnode, [("tck2trk.out_file", "tracts"),
        ("CFFConverter.connectome_file", "connectome"),
        ("NxStatsCFFConverter.connectome_file", "nxstatscff"),
        ("CreateMatrix.matrix_mat_file", "cmatrix"),
        ("CreateMatrix.mean_fiber_length_matrix_mat_file", "mean_fiber_length"),
        ("CreateMatrix.median_fiber_length_matrix_mat_file", "median_fiber_length"),
        ("CreateMatrix.fiber_length_std_matrix_mat_file", "fiber_length_std"),
        ("CreateMatrix.matrix_files", "networks"),
        ("CreateMatrix.filtered_tractographies", "filtered_tracts"),
        ("merge_nfib_csvs.csv_file", "fiber_csv"),
        ("mri_convert_ROI_scale500.out_file", "rois"),
        ("trk2tdi.out_file", "tdi"),
        ("csdeconv.spherical_harmonics_image", "odfs"),
        ("mri_convert_Brain.out_file", "struct"),
        ("MRconvert_fa.converted", "fa"),
        ("MRconvert_tracks2prob.converted", "tracks2prob")])
        ])

    connectivity.connect([(cmats_to_csv, outputnode,[("outputnode.csv_file","cmatrices_csv")])])
    connectivity.connect([(networkx, outputnode,[("outputnode.csv_files","nxcsv")])])
    return connectivity
Esempio n. 28
0
def create_spatial_map_dataflow(spatial_maps, wf_name='datasource_maps'):

    import os

    wf = pe.Workflow(name=wf_name)

    spatial_map_dict = {}

    for spatial_map_file in spatial_maps:

        spatial_map_file = spatial_map_file.rstrip('\r\n')
        base_file = os.path.basename(spatial_map_file)

        try:
            valid_extensions = ['.nii', '.nii.gz']

            base_name = [
                base_file[-len(ext)] for ext in valid_extensions
                if base_file.endswith(ext)
            ][0]

            if base_name in spatial_map_dict:
                raise ValueError(
                    'Files with same name not allowed: %s %s' %
                    (spatial_map_file, spatial_map_dict[base_name]))

            spatial_map_dict[base_name] = spatial_map_file

        except IndexError as e:
            raise Exception('Error in spatial_map_dataflow: '
                            'File extension not in .nii and .nii.gz')

        except Exception as e:
            raise e

    inputnode = pe.Node(util.IdentityInterface(
        fields=['spatial_map', 'spatial_map_file', 'creds_path', 'dl_dir'],
        mandatory_inputs=True),
                        name='inputspec')

    spatial_map_keys, spatial_map_values = \
        zip(*spatial_map_dict.items())

    inputnode.synchronize = True
    inputnode.iterables = [
        ('spatial_map', spatial_map_keys),
        ('spatial_map_file', spatial_map_values),
    ]

    check_s3_node = pe.Node(function.Function(
        input_names=['file_path', 'creds_path', 'dl_dir', 'img_type'],
        output_names=['local_path'],
        function=check_for_s3,
        as_module=True),
                            name='check_for_s3')

    wf.connect(inputnode, 'spatial_map_file', check_s3_node, 'file_path')
    wf.connect(inputnode, 'creds_path', check_s3_node, 'creds_path')
    wf.connect(inputnode, 'dl_dir', check_s3_node, 'dl_dir')
    check_s3_node.inputs.img_type = 'mask'

    select_spatial_map = pe.Node(util.IdentityInterface(fields=['out_file'],
                                                        mandatory_inputs=True),
                                 name='select_spatial_map')

    wf.connect(check_s3_node, 'local_path', select_spatial_map, 'out_file')

    return wf
Esempio n. 29
0
def init_single_subject_wf(
        subject_id, name, reportlets_dir, output_dir, bids_dir, ignore, debug,
        write_local_bvecs, low_mem, anat_only, longitudinal, b0_threshold,
        denoise_before_combining, dwi_denoise_window, combine_all_dwis,
        omp_nthreads, skull_strip_template, force_spatial_normalization,
        skull_strip_fixed_seed, freesurfer, hires, output_spaces, template,
        output_resolution, prefer_dedicated_fmaps, motion_corr_to,
        b0_to_t1w_transform, hmc_model, hmc_transform, shoreline_iters,
        eddy_config, impute_slice_threshold, fmap_bspline, fmap_demean,
        use_syn, force_syn):
    """
    This workflow organizes the preprocessing pipeline for a single subject.
    It collects and reports information about the subject, and prepares
    sub-workflows to perform anatomical and diffusion preprocessing.

    Anatomical preprocessing is performed in a single workflow, regardless of
    the number of sessions.
    Diffusion preprocessing is performed using a separate workflow for each
    session's dwi series.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from qsiprep.workflows.base import init_single_subject_wf

        wf = init_single_subject_wf(
            subject_id='test',
            name='single_subject_qsipreptest_wf',
            reportlets_dir='.',
            output_dir='.',
            bids_dir='.',
            ignore=[],
            debug=False,
            low_mem=False,
            output_resolution=1.25,
            denoise_before_combining=True,
            dwi_denoise_window=7,
            anat_only=False,
            longitudinal=False,
            b0_threshold=100,
            freesurfer=False,
            hires=False,
            force_spatial_normalization=True,
            combine_all_dwis=True,
            omp_nthreads=1,
            skull_strip_template='OASIS',
            skull_strip_fixed_seed=False,
            output_spaces=['T1w', 'template'],
            template='MNI152NLin2009cAsym',
            prefer_dedicated_fmaps=False,
            motion_corr_to='iterative',
            b0_to_t1w_transform='Rigid',
            hmc_model='3dSHORE',
            hmc_transform='Affine',
            eddy_config=None,
            shoreline_iters=2,
            impute_slice_threshold=0.0,
            write_local_bvecs=False,
            fmap_bspline=False,
            fmap_demean=True,
            use_syn=False,
            force_syn=False)

    Parameters

        subject_id : str
            List of subject labels
        name : str
            Name of workflow
        ignore : list
            Preprocessing steps to skip (may include "sbref", "fieldmaps")
        debug : bool
            Do inaccurate but fast normalization
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        anat_only : bool
            Disable functional workflows
        longitudinal : bool
            Treat multiple sessions as longitudinal (may increase runtime)
            See sub-workflows for specific differences
        b0_threshold : int
            Images with b-values less than this value will be treated as a b=0 image.
        dwi_denoise_window : int
            window size in voxels for ``dwidenoise``. Must be odd. If 0, '
            '``dwidwenoise`` will not be run'
        denoise_before_combining : bool
            'run ``dwidenoise`` before combining dwis. Requires ``combine_all_dwis``'
        combine_all_dwis : Bool
            Combine all dwi sequences within a session into a single data set
        omp_nthreads : int
            Maximum number of threads an individual process may use
        skull_strip_template : str
            Name of ANTs skull-stripping template ('OASIS' or 'NKI')
        skull_strip_fixed_seed : bool
            Do not use a random seed for skull-stripping - will ensure
            run-to-run replicability when used with --omp-nthreads 1
        freesurfer : bool
            Enable FreeSurfer surface reconstruction (may increase runtime)
        hires : bool
            Enable sub-millimeter preprocessing in FreeSurfer
        reportlets_dir : str
            Directory in which to save reportlets
        output_dir : str
            Directory in which to save derivatives
        bids_dir : str
            Root directory of BIDS dataset
        output_spaces : list
            List of output spaces functional images are to be resampled to.
            Some parts of pipeline will only be instantiated for some output
            spaces.

            Valid spaces:

             - T1w
             - template

        template : str
            Name of template targeted by ``template`` output space
        hmc_model : 'none', '3dSHORE' or 'MAPMRI'
            Model used to generate target images for head motion correction. If 'none'
            the transform from the nearest b0 will be used.
        hmc_transform : "Rigid" or "Affine"
            Type of transform used for head motion correction
        impute_slice_threshold : float
            Impute data in slices that are this many SDs from expected. If 0, no slices
            will be imputed.
        motion_corr_to : str
            Motion correct using the 'first' b0 image or use an 'iterative'
            method to motion correct to the midpoint of the b0 images
        eddy_config: str
            Path to a JSON file containing config options for eddy
        b0_to_t1w_dof : 6, 9 or 12
            Degrees-of-freedom for b0-T1w registration
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion
            correction (SDC). If fieldmaps are present and enabled, this is not
            run, by default.
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        eddy_config: str
            Path to a JSON file containing config options for eddy


    Inputs

        subjects_dir
            FreeSurfer SUBJECTS_DIR

    """
    if name in ('single_subject_wf', 'single_subject_qsipreptest_wf'):
        # for documentation purposes
        subject_data = {
            't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
            'dwi': ['/completely/made/up/path/sub-01_dwi.nii.gz']
        }
        layout = None
        LOGGER.warning("Building a test workflow")
    else:
        subject_data, layout = collect_data(bids_dir, subject_id)

    # Make sure we always go through these two checks
    if not anat_only and subject_data['dwi'] == []:
        raise Exception("No dwi images found for participant {}. "
                        "All workflows require dwi images.".format(subject_id))

    if not subject_data['t1w']:
        raise Exception("No T1w images found for participant {}. "
                        "All workflows require T1w images.".format(subject_id))

    workflow = Workflow(name=name)
    workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *QSIprep* {qsiprep_ver},
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(qsiprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *qsiprep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362] and
*Dipy* [@dipy].
For more details of the pipeline, see [the section corresponding
to workflows in *qsiprep*'s documentation]\
(https://qsiprep.readthedocs.io/en/latest/workflows.html \
"qsiprep's documentation").


### References

""".format(nilearn_ver=nilearn_ver)

    inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
                        name='inputnode')

    bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data,
                                      anat_only=anat_only),
                      name='bidssrc')

    bids_info = pe.Node(BIDSInfo(),
                        name='bids_info',
                        run_without_submitting=True)

    summary = pe.Node(SubjectSummary(output_spaces=output_spaces,
                                     template=template),
                      name='summary',
                      run_without_submitting=True)

    about = pe.Node(AboutSummary(version=__version__,
                                 command=' '.join(sys.argv)),
                    name='about',
                    run_without_submitting=True)

    ds_report_summary = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, suffix='summary'),
                                name='ds_report_summary',
                                run_without_submitting=True)

    ds_report_about = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, suffix='about'),
                              name='ds_report_about',
                              run_without_submitting=True)

    # Preprocessing of T1w (includes registration to MNI)
    anat_preproc_wf = init_anat_preproc_wf(
        name="anat_preproc_wf",
        skull_strip_template=skull_strip_template,
        skull_strip_fixed_seed=skull_strip_fixed_seed,
        output_spaces=output_spaces,
        template=template,
        output_resolution=output_resolution,
        force_spatial_normalization=force_spatial_normalization,
        debug=debug,
        longitudinal=longitudinal,
        omp_nthreads=omp_nthreads,
        freesurfer=freesurfer,
        hires=hires,
        reportlets_dir=reportlets_dir,
        output_dir=output_dir,
        num_t1w=len(subject_data['t1w']))

    workflow.connect([
        (inputnode, anat_preproc_wf, [('subjects_dir',
                                       'inputnode.subjects_dir')]),
        (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')
                              ]),
        (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
        (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w')]),
        (bids_info, summary, [('subject_id', 'subject_id')]),
        (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
                                    ('t2w', 'inputnode.t2w'),
                                    ('roi', 'inputnode.roi'),
                                    ('flair', 'inputnode.flair')]),
        (summary, anat_preproc_wf, [('subject_id', 'inputnode.subject_id')]),
        (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name),
                                       'source_file')]),
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name),
                                     'source_file')]),
        (about, ds_report_about, [('out_report', 'in_file')]),
    ])

    if anat_only:
        return workflow

    if impute_slice_threshold > 0 and hmc_model == "none":
        LOGGER.warning(
            "hmc_model must not be 'none' if slices are to be imputed. "
            "setting `impute_slice_threshold=0`")
        impute_slice_threshold = 0

    # Handle the grouping of multiple dwi files within a session
    dwi_session_groups = get_session_groups(layout, subject_data,
                                            combine_all_dwis)
    LOGGER.info(dwi_session_groups)

    dwi_fmap_groups = []
    for dwi_session_group in dwi_session_groups:
        dwi_fmap_groups.extend(
            group_by_warpspace(dwi_session_group, layout,
                               prefer_dedicated_fmaps, hmc_model == "eddy",
                               "fieldmaps" in ignore or force_syn,
                               combine_all_dwis))
    outputs_to_files = {
        _get_output_fname(dwi_group): dwi_group
        for dwi_group in dwi_fmap_groups
    }

    summary.inputs.dwi_groupings = outputs_to_files

    # create a processing pipeline for the dwis in each session
    for output_fname, dwi_info in outputs_to_files.items():
        dwi_preproc_wf = init_dwi_preproc_wf(
            scan_groups=dwi_info,
            output_prefix=output_fname,
            layout=layout,
            ignore=ignore,
            b0_threshold=b0_threshold,
            dwi_denoise_window=dwi_denoise_window,
            denoise_before_combining=denoise_before_combining,
            motion_corr_to=motion_corr_to,
            b0_to_t1w_transform=b0_to_t1w_transform,
            write_local_bvecs=write_local_bvecs,
            hmc_model=hmc_model,
            hmc_transform=hmc_transform,
            shoreline_iters=shoreline_iters,
            eddy_config=eddy_config,
            impute_slice_threshold=impute_slice_threshold,
            reportlets_dir=reportlets_dir,
            output_spaces=output_spaces,
            template=template,
            output_dir=output_dir,
            omp_nthreads=omp_nthreads,
            low_mem=low_mem,
            fmap_bspline=fmap_bspline,
            fmap_demean=fmap_demean,
            use_syn=use_syn,
            force_syn=force_syn)

        workflow.connect([
            (
                anat_preproc_wf,
                dwi_preproc_wf,
                [
                    ('outputnode.t1_preproc', 'inputnode.t1_preproc'),
                    ('outputnode.t1_brain', 'inputnode.t1_brain'),
                    ('outputnode.t1_mask', 'inputnode.t1_mask'),
                    ('outputnode.t1_seg', 'inputnode.t1_seg'),
                    ('outputnode.t1_aseg', 'inputnode.t1_aseg'),
                    ('outputnode.t1_aparc', 'inputnode.t1_aparc'),
                    ('outputnode.t1_tpms', 'inputnode.t1_tpms'),
                    ('outputnode.t1_2_mni_forward_transform',
                     'inputnode.t1_2_mni_forward_transform'),
                    ('outputnode.t1_2_mni_reverse_transform',
                     'inputnode.t1_2_mni_reverse_transform'),
                    ('outputnode.dwi_sampling_grid',
                     'inputnode.dwi_sampling_grid'),
                    # Undefined if --no-freesurfer, but this is safe
                    ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
                    ('outputnode.subject_id', 'inputnode.subject_id'),
                    ('outputnode.t1_2_fsnative_forward_transform',
                     'inputnode.t1_2_fsnative_forward_transform'),
                    ('outputnode.t1_2_fsnative_reverse_transform',
                     'inputnode.t1_2_fsnative_reverse_transform')
                ]),
        ])

    return workflow
Esempio n. 30
0
def init_func_preproc_wf(bold_file):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep_rodents.workflows.tests import mock_config
            from fmriprep_rodents import config
            from fmriprep_rodents.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep_rodents.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.nibabel import ApplyMask
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.utils import DictMerge
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10

    # Have some options handy
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces
    output_dir = str(config.execution.output_dir)

    # Extract BIDS entities and metadata from BOLD file(s)
    entities = extract_entities(bold_file)
    layout = config.execution.layout

    # Take first file as reference
    ref_file = pop_file(bold_file)
    metadata = layout.get_metadata(ref_file)

    echo_idxs = listify(entities.get("echo", []))
    multiecho = len(echo_idxs) > 2
    if len(echo_idxs) == 1:
        config.loggers.warning(
            f"Running a single echo <{ref_file}> from a seemingly multi-echo dataset."
        )
        bold_file = ref_file  # Just in case - drop the list

    if len(echo_idxs) == 2:
        raise RuntimeError(
            "Multi-echo processing requires at least three different echos (found two)."
        )

    if multiecho:
        # Drop echo entity for future queries, have a boolean shorthand
        entities.pop("echo", None)
        # reorder echoes from shortest to largest
        tes, bold_file = zip(*sorted([
            (layout.get_metadata(bf)["EchoTime"], bf) for bf in bold_file
        ]))
        ref_file = bold_file[0]  # Reset reference to be the shortest TE

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        'Creating bold processing workflow for <%s> (%.2f GB / %d TRs). '
        'Memory resampled/largemem=%.2f/%.2f GB.',
        ref_file, mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    # Find associated sbref, if possible
    entities['suffix'] = 'sbref'
    entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
    sbref_files = layout.get(return_type='file', **entities)

    sbref_msg = f"No single-band-reference found for {os.path.basename(ref_file)}."
    if sbref_files and 'sbref' in config.workflow.ignore:
        sbref_msg = "Single-band reference file(s) found and ignored."
    elif sbref_files:
        sbref_msg = "Using single-band reference file(s) {}.".format(
            ','.join([os.path.basename(sbf) for sbf in sbref_files]))
    config.loggers.workflow.info(sbref_msg)

    # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
    fmaps = None
    if 'fieldmaps' not in config.workflow.ignore:
        fmaps = fieldmap_wrangler(layout, ref_file,
                                  use_syn=config.workflow.use_syn_sdc,
                                  force_syn=config.workflow.force_syn)
    elif config.workflow.use_syn_sdc or config.workflow.force_syn:
        # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
        fmaps = {'syn': False}

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (
        bool(metadata.get("SliceTiming"))
        and 'slicetiming' not in config.workflow.ignore
        and (_get_series_len(ref_file) > 4 or "TooShort")
    )

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_file', 'subjects_dir', 'subject_id',
                'anat_preproc', 'anat_mask', 'anat_dseg', 'anat_tpms',
                'anat_aseg', 'anat_aparc',
                'anat2std_xfm', 'std2anat_xfm', 'template',
                'anat2fsnative_xfm', 'fsnative2anat_xfm']),
        name='inputnode')
    inputnode.inputs.bold_file = bold_file

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1', 'bold_aparc_t1',
                'bold_std', 'bold_std_ref', 'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std',
                'bold_native', 'bold_cifti', 'cifti_variant', 'cifti_metadata', 'cifti_density',
                'surfaces', 'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
                'confounds_metadata']),
        name='outputnode')

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='boldbuffer')

    summary = pe.Node(
        FunctionalSummary(
            slice_timing=run_stc,
            registration=('FSL', 'FreeSurfer')[freesurfer],
            registration_dof=config.workflow.bold2t1w_dof,
            registration_init=config.workflow.bold2t1w_init,
            pe_direction=metadata.get("PhaseEncodingDirection"),
            echo_idx=echo_idxs,
            tr=metadata.get("RepetitionTime")),
        name='summary', mem_gb=config.DEFAULT_MEMORY_MIN_GB, run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=output_dir,
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(
        omp_nthreads=omp_nthreads,
        bold_file=bold_file,
        sbref_files=sbref_files,
        multiecho=multiecho,
    )
    bold_reference_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(
        bold2t1w_dof=config.workflow.bold2t1w_dof,
        bold2t1w_init=config.workflow.bold2t1w_init,
        freesurfer=freesurfer,
        mem_gb=mem_gb['resampled'],
        name='bold_reg_wf',
        omp_nthreads=omp_nthreads,
        sloppy=config.execution.debug,
        use_bbr=config.workflow.use_bbr,
        use_compression=False,
    )

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=bool(fmaps),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=bool(fmaps),
        name='bold_bold_trans_wf'
    )
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [
                ('outputnode.skip_vols', 'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([
                (bold_reference_wf, bold_stc_wf, [
                    ('outputnode.bold_file', 'inputnode.bold_file')])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([
                (meepi_echos, bold_stc_wf, [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([
            (bold_reference_wf, boldbuffer, [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmaps, metadata,
                                       omp_nthreads=omp_nthreads,
                                       debug=config.execution.debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(niu.IdentityInterface(fields=['bold_files']),
                                 joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
                                 joinfield=['bold_files'],
                                 name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos, [
                ('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [
                ('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        (inputnode, t1w_brain, [('anat_preproc', 'in_file'),
                                ('anat_mask', 'in_mask')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf, [
            ('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
            ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [
            ('outputnode.algo_dummy_scans', 'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (inputnode, bold_reg_wf, [
            ('anat_dseg', 'inputnode.t1w_dseg'),
            # Undefined if --fs-no-reconall, but this is safe
            ('subjects_dir', 'inputnode.subjects_dir'),
            ('subject_id', 'inputnode.subject_id'),
            ('fsnative2anat_xfm', 'inputnode.fsnative2t1w_xfm')]),
        (t1w_brain, bold_reg_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [
            ('bold_file', 'inputnode.name_source'),
            ('anat_mask', 'inputnode.t1w_mask'),
            ('anat_aseg', 'inputnode.t1w_aseg'),
            ('anat_aparc', 'inputnode.t1w_aparc')]),
        (t1w_brain, bold_t1_trans_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms', 'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [
            ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode, [('outputnode.bold_t1', 'bold_t1'),
                                        ('outputnode.bold_t1_ref', 'bold_t1_ref'),
                                        ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
                                        ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (t1w_brain, bold_sdc_wf, [
            ('out_file', 'inputnode.t1w_brain')]),
        (bold_reference_wf, bold_sdc_wf, [
            ('outputnode.ref_image', 'inputnode.epi_file'),
            ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
            ('outputnode.bold_mask', 'inputnode.epi_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp'),
            ('outputnode.epi_mask', 'inputnode.ref_bold_mask'),
            ('outputnode.epi_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf, bold_bold_trans_wf, [
            ('outputnode.out_warp', 'inputnode.fieldwarp'),
            ('outputnode.epi_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, bold_reg_wf, [
            ('outputnode.epi_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('anat_tpms', 'inputnode.t1w_tpms'),
                                        ('anat_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [
            ('outputnode.movpar_file', 'inputnode.movpar_file'),
            ('outputnode.rmsd_file', 'inputnode.rmsd_file')]),
        (bold_reg_wf, bold_confounds_wf, [
            ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [
            ('outputnode.skip_vols', 'inputnode.skip_vols')]),
        (bold_bold_trans_wf, bold_confounds_wf, [
            ('outputnode.bold_mask', 'inputnode.bold_mask'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [
            ('out_files', 'inputnode.bold_file')]),
        (bold_hmc_wf, bold_bold_trans_wf, [
            ('outputnode.xforms', 'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [
                ('bold_file', 'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold')]),
            (bold_split, bold_t1_trans_wf, [
                ('out_files', 'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf, [
                (('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [
                ('outputnode.bold', 'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf, [
                ('outputnode.bold', 'inputnode.bold')]),
            (bold_t2s_wf, bold_t1_trans_wf, [
                ('outputnode.bold', 'inputnode.bold_split')]),
        ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [
                ('anat_dseg', 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf, [
                ('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [
                ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [
                ('outputnode.epi_corrected', 'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        # And ensure echo is dropped from report
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep-rodents'
                fmap_unwarp_report_wf.get_node(node).inputs.dismiss_entities = ("echo",)

        for node in bold_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep-rodents'
                bold_sdc_wf.get_node(node).inputs.dismiss_entities = ("echo",)

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(
                KeySelect(fields=['std2anat_xfm']),
                name='sdc_select_std', run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
                                             ('template', 'keys')]),
                (sdc_select_std, bold_sdc_wf, [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [
                    ('anat_dseg', 'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf, [
                    ('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf, [
                    ('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [
                    ('outputnode.syn_ref', 'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            # And ensure echo is dropped from report
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep-rodents'
                    syn_unwarp_report_wf.get_node(node).inputs.dismiss_entities = ("echo",)

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (
            FixHeaderApplyTransforms as ApplyTransforms
        )

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel'),
                                  name='boldmask_to_t1w', mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [
                ('outputnode.itk_bold_to_t1', 'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask_t1', 'reference_image')]),
            (bold_bold_trans_wf, boldmask_to_t1w, [
                ('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [
                ('output_image', 'bold_mask_t1')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [
                ('outputnode.bold', 'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf, [
                ('outputnode.bold_ref', 'inputnode.bold_native_ref'),
                ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3,)):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf, [
                ('template', 'inputnode.templates'),
                ('anat2std_xfm', 'inputnode.anat2std_xfm'),
                ('bold_file', 'inputnode.name_source'),
                ('anat_aseg', 'inputnode.bold_aseg'),
                ('anat_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [
                ('outputnode.xforms', 'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [
                ('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf, bold_std_trans_wf, [
                ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [
                ('outputnode.out_warp', 'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode, [('outputnode.bold_std', 'bold_std'),
                                             ('outputnode.bold_std_ref', 'bold_std_ref'),
                                             ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode, [
                    ('outputnode.bold_aseg_std', 'bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            workflow.connect([
                (bold_split, bold_std_trans_wf, [
                    ('out_files', 'inputnode.bold_split')])
            ])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([
                (bold_t2s_wf, split_opt_comb, [
                    ('outputnode.bold', 'in_file')]),
                (split_opt_comb, bold_std_trans_wf, [
                    ('out_files', 'inputnode.bold_split')
                ])
            ])

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference', 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                use_fieldwarp=bool(fmaps),
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2), name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(), name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [
                    ('bold_file', 'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [
                    ('outputnode.movpar_file', 'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [
                    ('outputnode.skip_vols', 'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [
                    ('outputnode.confounds_file', 'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                    [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join,
                    [('outputnode.aroma_confounds', 'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                    [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                    [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                     ('outputnode.melodic_mix', 'melodic_mix'),
                     ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf, [
                    ('outputnode.bold_std', 'inputnode.bold_std'),
                    ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                    ('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug('Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf, [
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('anat2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1', 'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [
                ('outputnode.target', 'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [
                    ('subjects_dir', 'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf, [
                    ('outputnode.bold_std', 'inputnode.bold_std'),
                    ('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode, [
                    ('outputnode.cifti_bold', 'bold_cifti'),
                    ('outputnode.cifti_variant', 'cifti_variant'),
                    ('outputnode.cifti_metadata', 'cifti_metadata'),
                    ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3,)):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            cifti_output=config.workflow.cifti_output,
            name='carpetplot_wf')

        if config.workflow.cifti_output:
            workflow.connect(
                bold_grayords_wf, 'outputnode.cifti_bold', carpetplot_wf, 'inputnode.cifti_bold'
            )
        else:
            # Xform to 'MNI152NLin2009cAsym' is always computed.
            carpetplot_select_std = pe.Node(
                KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
                name='carpetplot_select_std', run_without_submitting=True)

            workflow.connect([
                (inputnode, carpetplot_select_std, [
                    ('std2anat_xfm', 'std2anat_xfm'),
                    ('template', 'keys')]),
                (carpetplot_select_std, carpetplot_wf, [
                    ('std2anat_xfm', 'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf, carpetplot_wf, [
                    ('outputnode.bold', 'inputnode.bold')]),
                (bold_bold_trans_wf, carpetplot_wf, [
                    ('outputnode.bold_mask', 'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [
                    ('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
            ])

        workflow.connect([
            (bold_confounds_wf, carpetplot_wf, [
                        ('outputnode.confounds_file', 'inputnode.confounds_file')])
        ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(
        DerivativesDataSink(desc='summary', datatype="figures", dismiss_entities=("echo",)),
        name='ds_report_summary', run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(
        DerivativesDataSink(base_directory=output_dir, desc='validation', datatype="figures",
                            dismiss_entities=("echo",)),
        name='ds_report_validation', run_without_submitting=True,
        mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation, [
            ('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = output_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow