Exemple #1
0
def test_multiple_join_nodes():
    """Test two join nodes, one downstream of the other."""
    global _products
    _products = []
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2, 3])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # the first join node
    join1 = pe.JoinNode(IdentityInterface(fields=['vector']),
                        joinsource='inputspec',
                        joinfield='vector',
                        name='join1')
    wf.connect(pre_join1, 'output1', join1, 'vector')
    # an uniterated post-join node
    post_join1 = pe.Node(SumInterface(), name='post_join1')
    wf.connect(join1, 'vector', post_join1, 'input1')
    # the downstream join node connected to both an upstream join
    # path output and a separate input in the iterated path
    join2 = pe.JoinNode(IdentityInterface(fields=['vector', 'scalar']),
                        joinsource='inputspec',
                        joinfield='vector',
                        name='join2')
    wf.connect(pre_join1, 'output1', join2, 'vector')
    wf.connect(post_join1, 'output1', join2, 'scalar')
    # a second post-join node
    post_join2 = pe.Node(SumInterface(), name='post_join2')
    wf.connect(join2, 'vector', post_join2, 'input1')
    # a third post-join node
    post_join3 = pe.Node(ProductInterface(), name='post_join3')
    wf.connect(post_join2, 'output1', post_join3, 'input1')
    wf.connect(join2, 'scalar', post_join3, 'input2')

    result = wf.run()

    # The expanded graph contains one pre_join1 replicate per inputspec
    # replicate and one of each remaining node = 3 + 5 = 8 nodes.
    # The replicated inputspec nodes are factored out of the expansion.
    assert_equal(len(result.nodes()), 8,
                 "The number of expanded nodes is incorrect.")
    # The outputs are:
    # pre_join1: [2, 3, 4]
    # post_join1: 9
    # join2: [2, 3, 4] and 9
    # post_join2: 9
    # post_join3: 9 * 9 = 81
    assert_equal(_products, [81], "The post-join product is incorrect")

    os.chdir(cwd)
    rmtree(wd)
Exemple #2
0
def init_skullstrip_wf_3d(name, num_trs):
    workflow = pe.Workflow(name=name)
    # name the nodes
    inputnode = pe.Node(
        niu.IdentityInterface(fields=['bold_file', 'phase_file']),
        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['mask_file', 'div_file']),
        name='outputnode')

    buffernode = pe.Node(
        niu.IdentityInterface(fields=['bold_file', 'phase_file', 'volume']),
        name='buffernode')
    buffernode.iterables = [('volume', np.arange(5, dtype=int))]
    workflow.connect(inputnode, 'bold_file', buffernode, 'bold_file')
    workflow.connect(inputnode, 'phase_file', buffernode, 'phase_file')

    split_bold = pe.Node(
        interface=Function(['in_file', 'volume'], ['out_file'], split_file),
        name='split_bold')
    workflow.connect(buffernode, 'bold_file', split_bold, 'in_file')
    workflow.connect(buffernode, 'volume', split_bold, 'volume')

    split_phase = pe.Node(
        interface=Function(['in_file', 'volume'], ['out_file'], split_file),
        name='split_phase')
    workflow.connect(buffernode, 'phase_file', split_phase, 'in_file')
    workflow.connect(buffernode, 'volume', split_phase, 'volume')

    divide_wf = init_test_division_wf(name='divide_wf')
    workflow.connect(split_bold, 'out_file', divide_wf, 'inputnode.bold_file')
    workflow.connect(split_phase, 'out_file', divide_wf, 'inputnode.phase_file')

    merge_divided = pe.JoinNode(
        interface=Function(['in_files'], ['out_file'], join_files),
        name='merge_divided',
        joinfield=['in_files'],
        joinsource='buffernode')
    workflow.connect(divide_wf, 'outputnode.out_file', merge_divided, 'in_files')
    workflow.connect(merge_divided, 'out_file', outputnode, 'div_file')

    bold_skullstrip_wf = init_skullstrip_bold_wf(name='bold_skullstrip_wf')
    workflow.connect(split_bold, 'out_file', bold_skullstrip_wf, 'inputnode.in_file')

    mask_merger = pe.JoinNode(
        interface=Function(['in_files'], ['out_file'], join_files),
        name='mask_merger',
        joinfield=['in_files'],
        joinsource='buffernode')
    workflow.connect(bold_skullstrip_wf, 'outputnode.mask_file', mask_merger, 'in_files')
    workflow.connect(mask_merger, 'out_file', outputnode, 'mask_file')
    return workflow
Exemple #3
0
def test_itersource_two_join_nodes():
    """Test join with a midstream ``itersource`` and an upstream
    iterable."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2])]
    # an intermediate node in the first iteration path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # an iterable pre-join node with an itersource
    pre_join2 = pe.Node(ProductInterface(), name='pre_join2')
    pre_join2.itersource = ('inputspec', 'n')
    pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]})
    wf.connect(pre_join1, 'output1', pre_join2, 'input2')
    # an intermediate node in the second iteration path
    pre_join3 = pe.Node(IncrementInterface(), name='pre_join3')
    wf.connect(pre_join2, 'output1', pre_join3, 'input1')
    # the first join node
    join1 = pe.JoinNode(IdentityInterface(fields=['vector']),
                        joinsource='pre_join2',
                        joinfield='vector',
                        name='join1')
    wf.connect(pre_join3, 'output1', join1, 'vector')
    # a join successor node
    post_join1 = pe.Node(SumInterface(), name='post_join1')
    wf.connect(join1, 'vector', post_join1, 'input1')
    # a summary join node
    join2 = pe.JoinNode(IdentityInterface(fields=['vector']),
                        joinsource='inputspec',
                        joinfield='vector',
                        name='join2')
    wf.connect(post_join1, 'output1', join2, 'vector')

    result = wf.run()

    # the expanded graph contains the 14 test_itersource_join_source_node
    # nodes plus the summary join node.
    assert_equal(len(result.nodes()), 15,
                 "The number of expanded nodes is incorrect.")

    os.chdir(cwd)
    rmtree(wd)
Exemple #4
0
def attach_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a nilearn CanICA interface to `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name   = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf   = main_wf.get_node(srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir  = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(parameterization=False,
                                    base_directory=base_outdir,),
                           name="{}_datasink".format(wf_name))

    # the list of the subjects files
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # warp each subject to the group template
    ica = setup_node(CanICAInterface(), name="{}_ica".format(wf_name),)

    # Connect the nodes
    main_wf.connect([
                     # file list input
                     (src_wf,     ica_subjs, [(srcconn_name, "ica_subjs")]),

                     # canica
                     (ica_subjs,  ica,    [("ica_subjs",  "in_files")]),

                     # canica output
                     (ica, ica_datasink,  [("components", "canica.@components")]),
                     (ica, ica_datasink,  [("loadings",   "canica.@loadings")]),
                     (ica, ica_datasink,  [("score",      "canica.@score")]),
                   ])
    return main_wf
Exemple #5
0
def test_set_join_node():
    """Test collecting join inputs to a set."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2, 1, 3, 2])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # the set join node
    join = pe.JoinNode(SetInterface(), joinsource='inputspec',
        joinfield='input1', name='join')
    wf.connect(pre_join1, 'output1', join, 'input1')

    wf.run()

    # the join length is the number of unique inputs
    assert_equal(_set_len, 3,
        "The join Set output value is incorrect: %s." % _set_len)

    os.chdir(cwd)
    rmtree(wd)
Exemple #6
0
def join_iterables(workflow, joinsource_list, node_prefix, num_inputs=1):

    field_list = []
    for j in range(num_inputs):
        field_list.append(f'file_list{j}')

    i = 0
    for joinsource in joinsource_list:
        joinnode = pe.JoinNode(niu.IdentityInterface(fields=field_list),
                               name=f"{node_prefix}_{joinsource}_joinnode",
                               joinsource=joinsource,
                               joinfield=field_list)
        if i == 0:
            source_join = joinnode
        else:
            for field in field_list:
                workflow.connect([
                    (joinnode_prev, joinnode, [
                        (field, field),
                    ]),
                ])

        joinnode_prev = joinnode
        i += 1

    merged_join = joinnode

    return workflow, source_join, merged_join
Exemple #7
0
def collect_all(working_path):
    import_list = ["import warnings", "warnings.filterwarnings(\"ignore\")", "import os",
                   "import numpy as np",  "import indexed_gzip", "import nibabel as nib",
                   "import glob", "import pandas as pd", "import shutil", "from pathlib import Path"]

    wf = pe.Workflow(name="load_pd_dfs")

    inputnode = pe.Node(niu.IdentityInterface(fields=['working_path']), name='inputnode')
    inputnode.inputs.working_path = working_path

    build_subject_dict_node = pe.Node(niu.Function(input_names=['sub', 'working_path'], output_names=['files_'],
                                                   function=build_subject_dict), name="build_subject_dict_node",
                                      imports=import_list)
    build_subject_dict_node.iterables = ('sub', [i for i in os.listdir(working_path) if i.startswith('sub-')])
    build_subject_dict_node.synchronize = True

    df_join_node = pe.JoinNode(niu.IdentityInterface(fields=['files_']), name='df_join_node', joinfield=['files_'],
                               joinsource=build_subject_dict_node)

    load_pd_dfs_map = pe.MapNode(niu.Function(input_names=['file_'], outputs_names=['df'], function=load_pd_dfs),
                                 name="load_pd_dfs", imports=import_list, iterfield=['file_'], nested=True)

    outputnode = pe.Node(niu.IdentityInterface(fields=['dfs']), name='outputnode')

    wf.connect([
        (inputnode, build_subject_dict_node, [('working_path', 'working_path')]),
        (build_subject_dict_node, df_join_node, [('files_', 'files_')]),
        (df_join_node, load_pd_dfs_map, [('files_', 'file_')]),
        (load_pd_dfs_map, outputnode, [('df', 'dfs')]),
    ])

    return wf
Exemple #8
0
def test_set_join_node_file_input():
    """Test collecting join inputs to a set."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    open('test.nii', 'w+').close()
    open('test2.nii', 'w+').close()

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [os.path.join(wd, 'test.nii'), os.path.join(wd, 'test2.nii')])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IdentityInterface(fields=['n']), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'n')
    # the set join node
    join = pe.JoinNode(PickFirst(), joinsource='inputspec',
        joinfield='in_files', name='join')
    wf.connect(pre_join1, 'n', join, 'in_files')

    wf.run()

    os.chdir(cwd)
    rmtree(wd)
Exemple #9
0
def test_unique_join_node():
    """Test join with the ``unique`` flag set to True."""
    global _sum_operands
    _sum_operands = []
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [3, 1, 2, 1, 3])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # the set join node
    join = pe.JoinNode(SumInterface(), joinsource='inputspec',
        joinfield='input1', unique=True, name='join')
    wf.connect(pre_join1, 'output1', join, 'input1')

    wf.run()

    assert_equal(_sum_operands[0], [4, 2, 3],
        "The unique join output value is incorrect: %s." % _sum_operands[0])

    os.chdir(cwd)
    rmtree(wd)
def get_workflow(parameters, name=0):
    wf = pe.Workflow(name="%04d" % name + "regionGrowing")
    wf.base_dir = "/scratch/henry_temp/keshavan/region_growing_test"
    n = pe.Node(niu.Function(input_names=[
        "inputv", "seeds", "multiplier", "nbhd", "iterations", "timestep",
        "smoothingiterations"
    ],
                             output_names=["outfile"],
                             function=getSRGS),
                name="srgs")
    inputspec = pe.Node(niu.IdentityInterface(fields=["seeds", "in_file"]),
                        name="inputspec")
    n.iterables = [(q, parameters[q].tolist()) for q in [
        "multiplier", "nbhd", "iterations", "timestep", "smoothingiterations"
    ]]
    n.synchronize = True
    wf.connect(inputspec, "seeds", n, "seeds")
    wf.connect(inputspec, "in_file", n, "inputv")

    dt = pe.Node(fsl.ChangeDataType(output_datatype="short"), name="changedt")
    wf.connect(n, "outfile", dt, "in_file")

    stats = pe.Node(fsl.ImageStats(op_string="-c -w"), name="stats")
    wf.connect(dt, "out_file", stats, "in_file")

    avg = pe.JoinNode(ants.AverageImages(dimension=3, normalize=False),
                      name="average",
                      joinsource="srgs",
                      joinfield=["images"])
    wf.connect(dt, "out_file", avg, "images")

    st = pe.JoinNode(niu.Function(input_names=["out_stats", "parameters"],
                                  output_names=["outfile"],
                                  function=combine_stats),
                     name="combiner",
                     joinsource="srgs",
                     joinfield=["out_stats"])
    #wf.connect(dt, "out_file", st, "out_files")
    wf.connect(stats, "out_stat", st, "out_stats")
    st.inputs.parameters = parameters

    outputspec = pe.Node(niu.IdentityInterface(fields=["avg_image", "stats"]),
                         name="outputspec")
    wf.connect(avg, "output_average_image", outputspec, "avg_image")
    wf.connect(st, "outfile", outputspec, "stats")
    return wf, inputspec, outputspec
Exemple #11
0
def test_itersource_join_source_node():
    """Test join on an input node which has an ``itersource``."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2])]
    # an intermediate node in the first iteration path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # an iterable pre-join node with an itersource
    pre_join2 = pe.Node(ProductInterface(), name='pre_join2')
    pre_join2.itersource = ('inputspec', 'n')
    pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]})
    wf.connect(pre_join1, 'output1', pre_join2, 'input2')
    # an intermediate node in the second iteration path
    pre_join3 = pe.Node(IncrementInterface(), name='pre_join3')
    wf.connect(pre_join2, 'output1', pre_join3, 'input1')
    # the join node
    join = pe.JoinNode(IdentityInterface(fields=['vector']),
                       joinsource='pre_join2',
                       joinfield='vector',
                       name='join')
    wf.connect(pre_join3, 'output1', join, 'vector')
    # a join successor node
    post_join1 = pe.Node(SumInterface(), name='post_join1')
    wf.connect(join, 'vector', post_join1, 'input1')

    result = wf.run()

    # the expanded graph contains
    # 1 pre_join1 replicate for each inputspec iteration,
    # 2 pre_join2 replicates for each inputspec iteration,
    # 1 pre_join3 for each pre_join2 iteration,
    # 1 join replicate for each inputspec iteration and
    # 1 post_join1 replicate for each join replicate =
    # 2 + (2 * 2) + 4 + 2 + 2 = 14 expansion graph nodes.
    # Nipype factors away the iterable input
    # IdentityInterface but keeps the join IdentityInterface.
    assert_equal(len(result.nodes()), 14,
                 "The number of expanded nodes is incorrect.")
    # The first join inputs are:
    # 1 + (3 * 2) and 1 + (4 * 2)
    # The second join inputs are:
    # 1 + (5 * 3) and 1 + (6 * 3)
    # the post-join nodes execution order is indeterminate;
    # therefore, compare the lists item-wise.
    assert_true([16, 19] in _sum_operands,
                "The join Sum input is incorrect: %s." % _sum_operands)
    assert_true([7, 9] in _sum_operands,
                "The join Sum input is incorrect: %s." % _sum_operands)

    os.chdir(cwd)
    rmtree(wd)
Exemple #12
0
def create_within_subject_workflow(name, work_dir, sessions_file,
                                   session_template, scan_list, fs_dir):

    #initialize workflow
    workflow = pe.Workflow(name=name)
    workflow.base_dir = work_dir

    sessions_info = ColumnData(sessions_file, dtype=str)
    subject_ids = set(sessions_info['subject_id'])
    session_map = [(sid,
                    [s for i, s in zip(*sessions_info.values()) if i == sid])
                   for sid in subject_ids]

    ##for each subject
    subjects = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
                       name='subjects')
    subjects.iterables = [('subject_id', subject_ids)]

    ##for each session
    sessions = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id', 'session_dir']),
        name='sessions')
    sessions.itersource = ('subjects', 'subject_id')
    sessions.iterables = [('session_dir', dict(session_map))]
    workflow.connect(subjects, 'subject_id', sessions, 'subject_id')

    #get session directory
    get_session_dir = pe.Node(interface=nio.SelectFiles(session_template),
                              name='get_session_dir')
    workflow.connect(sessions, 'session_dir', get_session_dir, 'session_dir')

    #save outputs
    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.parameterization = False
    workflow.connect(get_session_dir, 'session_dir', datasink,
                     'base_directory')

    template = {'functional': 'mri/f.nii.gz'}
    get_files = pe.Node(nio.SelectFiles(template), name='get_files')
    workflow.connect(get_session_dir, 'session_dir', get_files,
                     'base_directory')

    join_sessions = pe.JoinNode(
        interface=util.IdentityInterface(fields=['functionals']),
        name='join_sessions',
        joinsource='sessions')
    workflow.connect(get_files, 'functional', join_sessions, 'functionals')

    within_subject_align = create_between_run_align_workflow(
        name='within_subject_align')
    workflow.connect(join_sessions, 'functionals', within_subject_align,
                     'inputs.in_files')

    return workflow
Exemple #13
0
def test_join_expansion():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # another pre-join node in the iterated path
    pre_join2 = pe.Node(IncrementInterface(), name='pre_join2')
    wf.connect(pre_join1, 'output1', pre_join2, 'input1')
    # the join node
    join = pe.JoinNode(SumInterface(),
                       joinsource='inputspec',
                       joinfield='input1',
                       name='join')
    wf.connect(pre_join2, 'output1', join, 'input1')
    # an uniterated post-join node
    post_join1 = pe.Node(IncrementInterface(), name='post_join1')
    wf.connect(join, 'output1', post_join1, 'input1')
    # a post-join node in the iterated path
    post_join2 = pe.Node(ProductInterface(), name='post_join2')
    wf.connect(join, 'output1', post_join2, 'input1')
    wf.connect(pre_join1, 'output1', post_join2, 'input2')

    result = wf.run()

    # the two expanded pre-join predecessor nodes feed into one join node
    joins = [node for node in result.nodes() if node.name == 'join']
    assert_equal(len(joins), 1,
                 "The number of join result nodes is incorrect.")
    # the expanded graph contains 2 * 2 = 4 iteration pre-join nodes, 1 join
    # node, 1 non-iterated post-join node and 2 * 1 iteration post-join nodes.
    # Nipype factors away the IdentityInterface.
    assert_equal(len(result.nodes()), 8,
                 "The number of expanded nodes is incorrect.")
    # the join Sum result is (1 + 1 + 1) + (2 + 1 + 1)
    assert_equal(len(_sums), 1, "The number of join outputs is incorrect")
    assert_equal(_sums[0], 7,
                 "The join Sum output value is incorrect: %s." % _sums[0])
    # the join input preserves the iterables input order
    assert_equal(_sum_operands[0], [3, 4],
                 "The join Sum input is incorrect: %s." % _sum_operands[0])
    # there are two iterations of the post-join node in the iterable path
    assert_equal(len(_products), 2,
                 "The number of iterated post-join outputs is incorrect")

    os.chdir(cwd)
    rmtree(wd)
Exemple #14
0
    def build_output_node(self):
        """Build and connect an output node to the pipeline.
        """
        import nipype.interfaces.io as nio
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils

        # Writing CAPS
        # ============
        join_node = npe.JoinNode(
            name='JoinOutputs',
            joinsource='ReadingFiles',
            interface=nutil.IdentityInterface(fields=self.get_output_fields()))

        write_node = npe.MapNode(name='WritingCAPS',
                                 iterfield=['container'] + [
                                     'connectome_based_processing.@' + o
                                     for o in self.get_output_fields()
                                 ],
                                 interface=nio.DataSink(infields=[
                                     'connectome_based_processing.@' + o
                                     for o in self.get_output_fields()
                                 ]))
        write_node.inputs.base_directory = self.caps_directory
        write_node.inputs.container = utils.get_containers(
            self.subjects, self.sessions)
        write_node.inputs.substitutions = [('trait_added', '')]
        write_node.inputs.parameterization = False

        self.connect([
            # Writing CAPS
            (self.output_node, join_node, [('response', 'response')]),
            (self.output_node, join_node, [('fod', 'fod')]),
            (self.output_node, join_node, [('tracts', 'tracts')]),
            (self.output_node, join_node, [('nodes', 'nodes')]),
            (self.output_node, join_node, [('connectomes', 'connectomes')]),
            (join_node, write_node,
             [('response', 'connectome_based_processing.@response')]),
            (join_node, write_node, [('fod',
                                      'connectome_based_processing.@fod')]),
            (join_node, write_node, [('tracts',
                                      'connectome_based_processing.@tracts')]),
            (join_node, write_node, [('nodes',
                                      'connectome_based_processing.@nodes')]),
            (join_node, write_node,
             [('connectomes', 'connectome_based_processing.@connectomes')]),
        ])
    def build_output_node(self):
        """Build and connect an output node to the pipeline."""
        import nipype.interfaces.io as nio
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe

        import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils

        # Writing CAPS
        # ============
        join_node = npe.JoinNode(
            name="JoinOutputs",
            joinsource="ReadingFiles",
            interface=nutil.IdentityInterface(fields=self.get_output_fields()),
        )

        write_node = npe.MapNode(
            name="WritingCAPS",
            iterfield=["container"]
            + [f"connectome_based_processing.@{o}" for o in self.get_output_fields()],
            interface=nio.DataSink(
                infields=[
                    f"connectome_based_processing.@{o}"
                    for o in self.get_output_fields()
                ]
            ),
        )
        write_node.inputs.base_directory = self.caps_directory
        write_node.inputs.container = utils.get_containers(self.subjects, self.sessions)
        write_node.inputs.substitutions = [("trait_added", "")]
        write_node.inputs.parameterization = False

        # fmt: off
        self.connect(
            [
                # Writing CAPS
                (self.output_node, join_node, [("response", "response")]),
                (self.output_node, join_node, [("fod", "fod")]),
                (self.output_node, join_node, [("tracts", "tracts")]),
                (self.output_node, join_node, [("nodes", "nodes")]),
                (self.output_node, join_node, [("connectomes", "connectomes")]),
                (join_node, write_node, [("response", "connectome_based_processing.@response")]),
                (join_node, write_node, [("fod", "connectome_based_processing.@fod")]),
                (join_node, write_node, [("tracts", "connectome_based_processing.@tracts")]),
                (join_node, write_node, [("nodes", "connectome_based_processing.@nodes")]),
                (join_node, write_node, [("connectomes", "connectome_based_processing.@connectomes")]),
            ]
        )
Exemple #16
0
def test_synchronize_join_node():
    """Test join on an input node which has the ``synchronize`` flag set to True."""
    global _products
    _products = []
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec')
    inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])]
    inputspec.synchronize = True
    # two pre-join nodes in a parallel iterated path
    inc1 = pe.Node(IncrementInterface(), name='inc1')
    wf.connect(inputspec, 'm', inc1, 'input1')
    inc2 = pe.Node(IncrementInterface(), name='inc2')
    wf.connect(inputspec, 'n', inc2, 'input1')
    # the join node
    join = pe.JoinNode(IdentityInterface(fields=['vector1', 'vector2']),
                       joinsource='inputspec',
                       name='join')
    wf.connect(inc1, 'output1', join, 'vector1')
    wf.connect(inc2, 'output1', join, 'vector2')
    # a post-join node
    prod = pe.MapNode(ProductInterface(),
                      name='prod',
                      iterfield=['input1', 'input2'])
    wf.connect(join, 'vector1', prod, 'input1')
    wf.connect(join, 'vector2', prod, 'input2')

    result = wf.run()

    # there are 3 iterables expansions.
    # thus, the expanded graph contains 2 * 2 iteration pre-join nodes, 1 join
    # node and 1 post-join node.
    assert_equal(len(result.nodes()), 6,
                 "The number of expanded nodes is incorrect.")
    # the product inputs are [2, 3] and [4, 5]
    assert_equal(_products, [8, 15],
                 "The post-join products is incorrect: %s." % _products)

    os.chdir(cwd)
    rmtree(wd)
Exemple #17
0
def test_multifield_join_node():
    """Test join on several fields."""
    global _products
    _products = []
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec')
    inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])]
    # two pre-join nodes in a parallel iterated path
    inc1 = pe.Node(IncrementInterface(), name='inc1')
    wf.connect(inputspec, 'm', inc1, 'input1')
    inc2 = pe.Node(IncrementInterface(), name='inc2')
    wf.connect(inputspec, 'n', inc2, 'input1')
    # the join node
    join = pe.JoinNode(IdentityInterface(fields=['vector1', 'vector2']),
                       joinsource='inputspec',
                       name='join')
    wf.connect(inc1, 'output1', join, 'vector1')
    wf.connect(inc2, 'output1', join, 'vector2')
    # a post-join node
    prod = pe.MapNode(ProductInterface(),
                      name='prod',
                      iterfield=['input1', 'input2'])
    wf.connect(join, 'vector1', prod, 'input1')
    wf.connect(join, 'vector2', prod, 'input2')

    result = wf.run()

    # the iterables are expanded as the cartesian product of the iterables values.
    # thus, the expanded graph contains 2 * (2 * 2) iteration pre-join nodes, 1 join
    # node and 1 post-join node.
    assert_equal(len(result.nodes()), 10,
                 "The number of expanded nodes is incorrect.")
    # the product inputs are [2, 4], [2, 5], [3, 4], [3, 5]
    assert_equal(set(_products), set([8, 10, 12, 15]),
                 "The post-join products is incorrect: %s." % _products)

    os.chdir(cwd)
    rmtree(wd)
Exemple #18
0
def test_node_joinsource():
    """Test setting the joinsource to a Node."""
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec')
    inputspec.iterables = [('n', [1, 2])]
    # the join node
    join = pe.JoinNode(SetInterface(), joinsource=inputspec,
        joinfield='input1', name='join')

    # the joinsource is the inputspec name
    assert_equal(join.joinsource, inputspec.name,
        "The joinsource is not set to the node name.")

    os.chdir(cwd)
    rmtree(wd)
Exemple #19
0
def test_identity_join_node():
    """Test an IdentityInterface join."""
    global _sum_operands
    _sum_operands = []
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)

    # Make the workflow.
    wf = pe.Workflow(name='test')
    # the iterated input node
    inputspec = pe.Node(IdentityInterface(fields=['n']),
                        name='inputspec')
    inputspec.iterables = [('n', [1, 2, 3])]
    # a pre-join node in the iterated path
    pre_join1 = pe.Node(IncrementInterface(), name='pre_join1')
    wf.connect(inputspec, 'n', pre_join1, 'input1')
    # the IdentityInterface join node
    join = pe.JoinNode(IdentityInterface(fields=['vector']),
                       joinsource='inputspec', joinfield='vector',
                       name='join')
    wf.connect(pre_join1, 'output1', join, 'vector')
    # an uniterated post-join node
    post_join1 = pe.Node(SumInterface(), name='post_join1')
    wf.connect(join, 'vector', post_join1, 'input1')

    result = wf.run()

    # the expanded graph contains 1 * 3 iteration pre-join nodes, 1 join
    # node and 1 post-join node. Nipype factors away the iterable input
    # IdentityInterface but keeps the join IdentityInterface.
    assert_equal(len(result.nodes()), 5,
        "The number of expanded nodes is incorrect.")
    assert_equal(_sum_operands[0], [2, 3, 4],
                 "The join Sum input is incorrect: %s." %_sum_operands[0])

    os.chdir(cwd)
    rmtree(wd)
Exemple #20
0
def init_bold_t2s_wf(bold_echos, echo_times, mem_gb, omp_nthreads,
                     name='bold_t2s_wf',
                     use_compression=True,
                     use_fieldwarp=False):
    """
    This workflow performs :abbr:`HMC (head motion correction)`
    on individual echo_files, uses T2SMap to generate a T2* image
    for coregistration instead of mean BOLD EPI.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_t2s_wf
        wf = init_bold_t2s_wf(
            bold_echos=['echo1', 'echo2', 'echo3'],
            echo_times=[13.6, 29.79, 46.59],
            mem_gb=3,
            omp_nthreads=1)

    **Parameters**

        bold_echos
            list of ME-BOLD files
        echo_times
            list of TEs associated with each echo
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        name : str
            Name of workflow (default: ``bold_t2s_wf``)
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI

    **Inputs**

        name_source
            (one echo of) the original BOLD series NIfTI file
            Used to recover original information lost during processing
        hmc_xforms
            ITKTransform file aligning each volume to ``ref_image``

    **Outputs**

        t2s_map
            the T2* map for the EPI run
        oc_mask
            the skull-stripped optimal combination mask
    """
    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A T2* map was estimated from preprocessed BOLD by fitting to a
monoexponential signal decay model with log-linear regression.
For each voxel, the maximal number of echoes with high signal in that voxel was
used to fit the model.
The T2* map was used to optimally combine preprocessed BOLD across
echoes following the method described in @posse_t2s and was also retained as
the BOLD reference.
"""

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bold_echos', 'name_source', 'hmc_xforms']),
        name='inputnode')
    inputnode.iterables = ('bold_echos', bold_echos)

    outputnode = pe.Node(niu.IdentityInterface(fields=['t2s_map', 'oc_mask']),
                         name='outputnode')

    LOGGER.log(25, 'Generating T2* map.')

    # Apply transforms in 1 shot
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb,
        omp_nthreads=omp_nthreads,
        use_compression=use_compression,
        use_fieldwarp=use_fieldwarp,
        name='bold_bold_trans_wf',
        split_file=True,
        interpolation='NearestNeighbor'
    )

    t2s_map = pe.JoinNode(T2SMap(
        te_list=echo_times), joinsource='inputnode', joinfield=['in_files'],
        name='t2s_map')

    skullstrip_bold_wf = init_skullstrip_bold_wf()

    mask_t2s = pe.Node(MaskT2SMap(), name='mask_t2s')

    workflow.connect([
        (inputnode, bold_bold_trans_wf, [
            ('bold_echos', 'inputnode.bold_file'),
            ('name_source', 'inputnode.name_source'),
            ('hmc_xforms', 'inputnode.hmc_xforms')]),
        (bold_bold_trans_wf, t2s_map, [('outputnode.bold', 'in_files')]),
        (t2s_map, skullstrip_bold_wf, [('opt_comb', 'inputnode.in_file')]),
        (t2s_map, mask_t2s, [('t2s_vol', 'image')]),
        (skullstrip_bold_wf, outputnode, [('outputnode.mask_file', 'oc_mask')]),
        (skullstrip_bold_wf, mask_t2s, [('outputnode.mask_file', 'mask')]),
        (mask_t2s, outputnode, [('masked_t2s', 't2s_map')])
    ])

    return workflow
Exemple #21
0
def run_workflow(csv_file, use_pbs, contrasts_name, template):
    workflow = pe.Workflow(name='run_level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    from nipype import config, logging
    config.update_config({
        'logging': {
            'log_directory': os.path.join(workflow.base_dir, 'logs'),
            'log_to_file': True,
            'workflow_level': 'DEBUG',
            'interface_level': 'DEBUG',
        }
    })
    logging.update_logging(config)

    config.enable_debug_mode()

    # redundant with enable_debug_mode() ...
    workflow.stop_on_first_crash = True
    workflow.remove_unnecessary_outputs = False
    workflow.keep_inputs = True
    workflow.hash_method = 'content'
    """
    Setup the contrast structure that needs to be evaluated. This is a list of
    lists. The inner list specifies the contrasts and has the following format:
    [Name,Stat,[list of condition names],[weights on those conditions]. The
    condition names must match the `names` listed in the `evt_info` function
    described above.
    """

    try:
        import importlib
        mod = importlib.import_module('contrasts.' + contrasts_name)
        contrasts = mod.contrasts
        # event_names = mod.event_names
    except ImportError:
        raise RuntimeError('Unknown contrasts: %s. Must exist as a Python'
                           ' module in contrasts directory!' % contrasts_name)

    modelfit = create_workflow(contrasts)

    import bids_templates as bt

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                        name='input')

    assert csv_file is not None, "--csv argument must be defined!"

    reader = niu.CSVReader()
    reader.inputs.header = True
    reader.inputs.in_file = csv_file
    out = reader.run()
    subject_list = out.outputs.subject
    session_list = out.outputs.session
    run_list = out.outputs.run

    inputnode.iterables = [
        ('subject_id', subject_list),
        ('session_id', session_list),
        ('run_id', run_list),
    ]
    inputnode.synchronize = True

    templates = {
        'funcs':
        'derivatives/featpreproc/highpassed_files/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc_*.nii.gz',

        # 'funcmasks':
        # 'featpreproc/func_unwarp/sub-{subject_id}/ses-{session_id}/func/'
        #     'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc'
        #     '_mc_unwarped.nii.gz',
        'highpass':
        '******'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_*.nii.gz',
        'motion_parameters':
        'derivatives/featpreproc/motion_corrected/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc.param.1D',
        'motion_outlier_files':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/ses-{session_id}/func/'
        'art.sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_mc'
        '_maths_outliers.txt',
        'event_log':
        'sub-{subject_id}/ses-{session_id}/func/'
        # 'sub-{subject_id}_ses-{session_id}*_bold_res-1x1x1_preproc'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}*'
        # '.nii.gz',
        '_events.tsv',
        'ref_func':
        'derivatives/featpreproc/reference/func/*.nii.gz',
        'ref_funcmask':
        'derivatives/featpreproc/reference/func_mask/*.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name='in_files')

    workflow.connect([
        (inputnode, inputfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('run_id', 'run_id'),
        ]),
    ])

    join_input = pe.JoinNode(
        niu.IdentityInterface(fields=[
            # 'subject_id',
            # 'session_id',
            # 'run_id',
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
            'ref_func',
            'ref_funcmask',
        ]),
        joinsource='input',
        joinfield=[
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
        ],
        # unique=True,
        name='join_input')

    workflow.connect([
        (inputfiles, join_input, [
            ('funcs', 'funcs'),
            ('highpass', 'highpass'),
            ('motion_parameters', 'motion_parameters'),
            ('motion_outlier_files', 'motion_outlier_files'),
            ('event_log', 'event_log'),
            ('ref_func', 'ref_func'),
            ('ref_funcmask', 'ref_funcmask'),
        ]),
        (join_input, modelfit, [
            ('funcs', 'inputspec.funcs'),
            ('highpass', 'inputspec.highpass'),
            ('motion_parameters', 'inputspec.motion_parameters'),
            ('motion_outlier_files', 'inputspec.motion_outlier_files'),
            ('event_log', 'inputspec.event_log'),
            ('ref_func', 'inputspec.ref_func'),
            ('ref_funcmask', 'inputspec.ref_funcmask'),
        ]),
    ])

    modelfit.inputs.inputspec.fwhm = 2.0
    modelfit.inputs.inputspec.highpass = 50
    modelfit.write_graph(simple_form=True)
    modelfit.write_graph(graph2use='orig', format='png', simple_form=True)
    # modelfit.write_graph(graph2use='detailed', format='png', simple_form=False)

    workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph(simple_form=True)
    workflow.write_graph(graph2use='colored', format='png', simple_form=True)
    # workflow.write_graph(graph2use='detailed', format='png', simple_form=False)
    if use_pbs:
        workflow.run(plugin='PBS',
                     plugin_args={'template': os.path.expanduser(template)})
    else:
        workflow.run()
Exemple #22
0
def init_func_preproc_wf(bold_file):
    """
    This workflow controls the functional preprocessing stages of *fMRIPrep*.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from fmriprep.workflows.tests import mock_config
            from fmriprep import config
            from fmriprep.workflows.bold.base import init_func_preproc_wf
            with mock_config():
                bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
                    / 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
                wf = init_func_preproc_wf(str(bold_file))

    Parameters
    ----------
    bold_file
        BOLD series NIfTI file

    Inputs
    ------
    bold_file
        BOLD series NIfTI file
    t1w_preproc
        Bias-corrected structural template image
    t1w_mask
        Mask of the skull-stripped template image
    t1w_dseg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    t1w_asec
        Segmentation of structural image, done with FreeSurfer.
    t1w_aparc
        Parcellation of structural image, done with FreeSurfer.
    t1w_tpms
        List of tissue probability maps in T1w space
    template
        List of templates to target
    anat2std_xfm
        List of transform files, collated with templates
    std2anat_xfm
        List of inverse transform files, collated with templates
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
    fsnative2t1w_xfm
        LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w

    Outputs
    -------
    bold_t1
        BOLD series, resampled to T1w space
    bold_mask_t1
        BOLD series mask in T1w space
    bold_std
        BOLD series, resampled to template space
    bold_mask_std
        BOLD series mask in template space
    confounds
        TSV of confounds
    surfaces
        BOLD series, resampled to FreeSurfer surfaces
    aroma_noise_ics
        Noise components identified by ICA-AROMA
    melodic_mix
        FSL MELODIC mixing matrix
    bold_cifti
        BOLD CIFTI image
    cifti_variant
        combination of target spaces for `bold_cifti`

    See Also
    --------

    * :py:func:`~niworkflows.func.util.init_bold_reference_wf`
    * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
    * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
    * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
    * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
    * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
    * :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
    * :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
    * :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
    * :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
    * :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`

    """
    from niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from niworkflows.func.util import init_bold_reference_wf
    from niworkflows.interfaces.nibabel import ApplyMask
    from niworkflows.interfaces.utility import KeySelect
    from niworkflows.interfaces.utils import DictMerge
    from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    # Have some options handy
    layout = config.execution.layout
    omp_nthreads = config.nipype.omp_nthreads
    freesurfer = config.workflow.run_reconall
    spaces = config.workflow.spaces

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    config.loggers.workflow.debug(
        'Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
        'Memory resampled/largemem=%.2f/%.2f GB.', ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # Find associated sbref, if possible
    entities = layout.parse_file_entities(ref_file)
    entities['suffix'] = 'sbref'
    entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
    files = layout.get(return_type='file', **entities)
    refbase = os.path.basename(ref_file)
    if 'sbref' in config.workflow.ignore:
        config.loggers.workflow.info("Single-band reference files ignored.")
    elif files and multiecho:
        config.loggers.workflow.warning(
            "Single-band reference found, but not supported in "
            "multi-echo workflows at this time. Ignoring.")
    elif files:
        sbref_file = files[0]
        sbbase = os.path.basename(sbref_file)
        if len(files) > 1:
            config.loggers.workflow.warning(
                "Multiple single-band reference files found for {}; using "
                "{}".format(refbase, sbbase))
        else:
            config.loggers.workflow.info(
                "Using single-band reference file %s.", sbbase)
    else:
        config.loggers.workflow.info("No single-band-reference found for %s.",
                                     refbase)

    metadata = layout.get_metadata(ref_file)

    # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
    fmaps = None
    if 'fieldmaps' not in config.workflow.ignore:
        fmaps = fieldmap_wrangler(layout,
                                  ref_file,
                                  use_syn=config.workflow.use_syn,
                                  force_syn=config.workflow.force_syn)
    elif config.workflow.use_syn or config.workflow.force_syn:
        # If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
        fmaps = {'syn': False}

    # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
    run_stc = (bool(metadata.get("SliceTiming"))
               and 'slicetiming' not in config.workflow.ignore
               and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if config.workflow.t2s_coreg and not multiecho:
        config.loggers.workflow.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        config.workflow.t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if config.workflow.t2s_coreg and config.workflow.use_bbr is None:
        config.workflow.use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'subjects_dir', 'subject_id', 't1w_preproc', 't1w_mask',
        't1w_dseg', 't1w_tpms', 't1w_aseg', 't1w_aparc', 'anat2std_xfm',
        'std2anat_xfm', 'template', 't1w2fsnative_xfm', 'fsnative2t1w_xfm'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_std', 'bold_std_ref', 'bold_mask_std',
        'bold_aseg_std', 'bold_aparc_std', 'bold_native', 'bold_cifti',
        'cifti_variant', 'cifti_metadata', 'cifti_density', 'surfaces',
        'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
        'confounds_metadata'
    ]),
                         name='outputnode')

    # Generate a brain-masked conversion of the t1w
    t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=config.workflow.bold2t1w_dof,
        registration_init=config.workflow.bold2t1w_init,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=config.DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = config.workflow.dummy_scans

    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=config.workflow.cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=str(config.execution.output_dir),
        spaces=spaces,
        use_aroma=config.workflow.use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surf_files'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_metadata', 'inputnode.cifti_metadata'),
            ('cifti_density', 'inputnode.cifti_density'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file',
                                             'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=config.workflow.use_bbr,
                                   bold2t1w_dof=config.workflow.bold2t1w_dof,
                                   bold2t1w_init=config.workflow.bold2t1w_init,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=bool(fmaps),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=config.workflow.regressors_all_comps,
        regressors_fd_th=config.workflow.regressors_fd_th,
        regressors_dvars_th=config.workflow.regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not config.execution.low_mem,
        use_fieldwarp=bool(fmaps),
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(bold_reference_wf, bold_stc_wf, [
                ('outputnode.bold_file', 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(bold_reference_wf, boldbuffer,
                           [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_estimate_wf(fmaps,
                                       metadata,
                                       omp_nthreads=omp_nthreads,
                                       debug=config.execution.debug)

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from niworkflows.func.util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=config.workflow.t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        (inputnode, t1w_brain, [('t1w_preproc', 'in_file'),
                                ('t1w_mask', 'in_mask')]),
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [('outputnode.algo_dummy_scans',
                                       'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1w_dseg', 'inputnode.t1w_dseg'),
                # Undefined if --fs-no-reconall, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')
            ]),
        (t1w_brain, bold_reg_wf, [('out_file', 'inputnode.t1w_brain')]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1w_mask', 'inputnode.t1w_mask'),
                                       ('t1w_aseg', 'inputnode.t1w_aseg'),
                                       ('t1w_aparc', 'inputnode.t1w_aparc')]),
        (t1w_brain, bold_t1_trans_wf, [('out_file', 'inputnode.t1w_brain')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (t1w_brain, bold_sdc_wf, [('out_file', 'inputnode.t1w_brain')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.epi_file'),
          ('outputnode.ref_image_brain', 'inputnode.epi_brain'),
          ('outputnode.bold_mask', 'inputnode.epi_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [('outputnode.out_warp',
                                          'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.epi_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
                                        ('t1w_mask', 'inputnode.t1w_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    if not config.workflow.t2s_coreg:
        workflow.connect([
            (bold_sdc_wf, bold_reg_wf, [('outputnode.epi_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_sdc_wf, bold_t1_trans_wf,
             [('outputnode.epi_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.epi_mask', 'inputnode.ref_bold_mask')]),
        ])
    else:
        workflow.connect([
            # For t2s_coreg, replace EPI-to-T1w registration inputs
            (bold_t2s_wf, bold_reg_wf, [('outputnode.bold_ref_brain',
                                         'inputnode.ref_bold_brain')]),
            (bold_t2s_wf, bold_t1_trans_wf,
             [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
              ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])

    if fmaps:
        from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1w_dseg',
                                                 'inputnode.in_seg')]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.epi_corrected',
                                                   'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'fmriprep'

        for node in bold_sdc_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'

        if 'syn' in fmaps:
            sdc_select_std = pe.Node(KeySelect(fields=['std2anat_xfm']),
                                     name='sdc_select_std',
                                     run_without_submitting=True)
            sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
            workflow.connect([
                (inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
                                             ('template', 'keys')]),
                (sdc_select_std, bold_sdc_wf, [('std2anat_xfm',
                                                'inputnode.std2anat_xfm')]),
            ])

        if fmaps.get('syn') is True:  # SyN forced
            syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1w_dseg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf, [('outputnode.syn_ref',
                                                      'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'fmriprep'

    # Map final BOLD mask into T1w space (if required)
    nonstd_spaces = set(spaces.get_nonstandard())
    if nonstd_spaces.intersection(('T1w', 'anat')):
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])

    if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            spaces=spaces,
            name='bold_std_trans_wf',
            use_compression=not config.execution.low_mem,
            use_fieldwarp=bool(fmaps),
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('template', 'inputnode.templates'),
              ('anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('t1w_aseg', 'inputnode.bold_aseg'),
              ('t1w_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             bold_std_trans_wf, [('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if not multiecho:
            workflow.connect([(bold_split, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

        # func_derivatives_wf internally parametrizes over snapshotted spaces.
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('outputnode.template', 'inputnode.template'),
                ('outputnode.spatial_reference',
                 'inputnode.spatial_reference'),
                ('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('outputnode.bold_std', 'inputnode.bold_std'),
                ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if config.workflow.use_aroma:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf
            ica_aroma_wf = init_ica_aroma_wf(
                mem_gb=mem_gb['resampled'],
                metadata=metadata,
                omp_nthreads=omp_nthreads,
                use_fieldwarp=bool(fmaps),
                err_on_aroma_warn=config.workflow.aroma_err_on_warn,
                aroma_melodic_dim=config.workflow.aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [('outputnode.skip_vols',
                                                    'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
            ])

    # SURFACES ##################################################################################
    # Freesurfer
    freesurfer_spaces = spaces.get_fs_spaces()
    if freesurfer and freesurfer_spaces:
        config.loggers.workflow.debug(
            'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(
            mem_gb=mem_gb['resampled'],
            surface_spaces=freesurfer_spaces,
            medial_surface_nan=config.workflow.medial_surface_nan,
            name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1w_preproc', 'inputnode.t1w_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
            (bold_surf_wf, func_derivatives_wf, [('outputnode.target',
                                                  'inputnode.surf_refs')]),
        ])

        # CIFTI output
        if config.workflow.cifti_output:
            from .resampling import init_bold_grayords_wf
            bold_grayords_wf = init_bold_grayords_wf(
                grayord_density=config.workflow.cifti_output,
                mem_gb=mem_gb['resampled'],
                repetition_time=metadata['RepetitionTime'])

            workflow.connect([
                (inputnode, bold_grayords_wf, [('subjects_dir',
                                                'inputnode.subjects_dir')]),
                (bold_std_trans_wf, bold_grayords_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.spatial_reference',
                   'inputnode.spatial_reference')]),
                (bold_surf_wf, bold_grayords_wf, [
                    ('outputnode.surfaces', 'inputnode.surf_files'),
                    ('outputnode.target', 'inputnode.surf_refs'),
                ]),
                (bold_grayords_wf, outputnode,
                 [('outputnode.cifti_bold', 'bold_cifti'),
                  ('outputnode.cifti_variant', 'cifti_variant'),
                  ('outputnode.cifti_metadata', 'cifti_metadata'),
                  ('outputnode.cifti_density', 'cifti_density')]),
            ])

    if spaces.get_spaces(nonstandard=False, dim=(3, )):
        carpetplot_wf = init_carpetplot_wf(
            mem_gb=mem_gb['resampled'],
            metadata=metadata,
            cifti_output=config.workflow.cifti_output,
            name='carpetplot_wf')

        if config.workflow.cifti_output:
            workflow.connect(bold_grayords_wf, 'outputnode.cifti_bold',
                             carpetplot_wf, 'inputnode.cifti_bold')
        else:
            # Xform to 'MNI152NLin2009cAsym' is always computed.
            carpetplot_select_std = pe.Node(KeySelect(
                fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
                                            name='carpetplot_select_std',
                                            run_without_submitting=True)

            workflow.connect([
                (inputnode, carpetplot_select_std, [('std2anat_xfm',
                                                     'std2anat_xfm'),
                                                    ('template', 'keys')]),
                (carpetplot_select_std, carpetplot_wf,
                 [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
                 carpetplot_wf, [('outputnode.bold', 'inputnode.bold'),
                                 ('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                               'inputnode.t1_bold_xform')]),
            ])

        workflow.connect([(bold_confounds_wf, carpetplot_wf, [
            ('outputnode.confounds_file', 'inputnode.confounds_file')
        ])])

    # REPORTING ############################################################
    reportlets_dir = str(config.execution.work_dir / 'reportlets')
    ds_report_summary = pe.Node(DerivativesDataSink(desc='summary',
                                                    keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='validation', keep_dtype=True),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=config.DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Exemple #23
0
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']])
datasource.inputs.subject_id = subject_list
datasource.inputs.sort_filelist = True

wf.connect(inputspec, 'subject_id', datasource, 'subject_id')
"""
Run recon-all
"""

recon_all = create_reconall_workflow()
recon_all.inputs.inputspec.subjects_dir = subjects_dir

wf.connect(datasource, 'struct', recon_all, 'inputspec.T1_files')
wf.connect(inputspec, 'subject_id', recon_all, 'inputspec.subject_id')
"""
Make average subject
"""

average = pe.JoinNode(interface=MakeAverageSubject(),
                      joinsource="inputspec",
                      joinfield="subjects_ids",
                      name="average")
average.inputs.subjects_dir = subjects_dir

wf.connect(recon_all, 'postdatasink_outputspec.subject_id', average,
           'subjects_ids')

wf.run("MultiProc", plugin_args={'n_procs': 4})
Exemple #24
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipelines.
        """

        import os
        import platform
        import nipype.interfaces.spm as spm
        import nipype.interfaces.matlab as mlab
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        import clinica.pipelines.t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_utils as seg_utils
        import clinica.pipelines.t1_volume_create_dartel.t1_volume_create_dartel_utils as dartel_utils
        import clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_utils as dartel2mni_utils
        from clinica.utils.io import unzip_nii

        spm_home = os.getenv("SPM_HOME")
        mlab_home = os.getenv("MATLABCMD")
        mlab.MatlabCommand.set_default_matlab_cmd(mlab_home)
        mlab.MatlabCommand.set_default_paths(spm_home)

        if 'SPMSTANDALONE_HOME' in os.environ:
            if 'MCR_HOME' in os.environ:
                matlab_cmd = os.path.join(os.environ['SPMSTANDALONE_HOME'],
                                          'run_spm12.sh') \
                             + ' ' + os.environ['MCR_HOME'] \
                             + ' script'
                spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)
                version = spm.SPMCommand().version
            else:
                raise EnvironmentError('MCR_HOME variable not in environnement. Althought, '
                                       + 'SPMSTANDALONE_HOME has been found')
        else:
            version = spm.Info.getinfo()

        if version:
            if isinstance(version, dict):
                spm_path = version['path']
                if version['name'] == 'SPM8':
                    print('You are using SPM version 8. The recommended version to use with Clinica is SPM 12. '
                          + 'Please upgrade your SPM toolbox.')
                    tissue_map = os.path.join(spm_path, 'toolbox/Seg/TPM.nii')
                elif version['name'] == 'SPM12':
                    tissue_map = os.path.join(spm_path, 'tpm/TPM.nii')
                else:
                    raise RuntimeError('SPM version 8 or 12 could not be found. Please upgrade your SPM toolbox.')
            if isinstance(version, str):
                if float(version) >= 12.7169:
                    if platform.system() == 'Darwin':
                        tissue_map = os.path.join(str(spm_home), 'spm12.app/Contents/MacOS/spm12_mcr/spm12/spm12/tpm/TPM.nii')
                    else:
                        tissue_map = os.path.join(str(spm_home), 'spm12_mcr/spm/spm12/tpm/TPM.nii')
                else:
                    raise RuntimeError('SPM standalone version not supported. Please upgrade SPM standalone.')
        else:
            raise RuntimeError('SPM could not be found. Please verify your SPM_HOME environment variable.')

        # Unzipping
        # ===============================
        unzip_node = npe.MapNode(nutil.Function(input_names=['in_file'],
                                                output_names=['out_file'],
                                                function=unzip_nii),
                                 name='unzip_node', iterfield=['in_file'])

        # Unified Segmentation
        # ===============================
        new_segment = npe.MapNode(spm.NewSegment(),
                                  name='new_segment',
                                  iterfield=['channel_files'])

        if self.parameters['affine_regularization'] is not None:
            new_segment.inputs.affine_regularization = self.parameters['affine_regularization']
        if self.parameters['channel_info'] is not None:
            new_segment.inputs.channel_info = self.parameters['channel_info']
        if self.parameters['sampling_distance'] is not None:
            new_segment.inputs.sampling_distance = self.parameters['sampling_distance']
        if self.parameters['warping_regularization'] is not None:
            new_segment.inputs.warping_regularization = self.parameters['warping_regularization']

        # Check if we need to save the forward transformation for registering the T1 to the MNI space
        if self.parameters['save_t1_mni'] is not None and self.parameters['save_t1_mni']:
            if self.parameters['write_deformation_fields'] is not None:
                self.parameters['write_deformation_fields'][1] = True
            else:
                self.parameters['write_deformation_fields'] = [False, True]

        if self.parameters['write_deformation_fields'] is not None:
            new_segment.inputs.write_deformation_fields = self.parameters['write_deformation_fields']

        if self.parameters['tpm'] is not None:
            tissue_map = self.parameters['tpm']

        new_segment.inputs.tissues = seg_utils.get_tissue_tuples(tissue_map,
                                                                 self.parameters['tissue_classes'],
                                                                 self.parameters['dartel_tissues'],
                                                                 self.parameters['save_warped_unmodulated'],
                                                                 self.parameters['save_warped_modulated'])

        # Apply segmentation deformation to T1 (into MNI space)
        # ========================================================
        if self.parameters['save_t1_mni'] is not None and self.parameters['save_t1_mni']:

            t1_to_mni = npe.MapNode(seg_utils.ApplySegmentationDeformation(),
                                    name='t1_to_mni',
                                    iterfield=['deformation_field', 'in_files'])
            self.connect([
                (unzip_node, t1_to_mni, [('out_file', 'in_files')]),
                (new_segment, t1_to_mni, [('forward_deformation_field', 'deformation_field')]),
                (t1_to_mni, self.output_node, [('out_files', 't1_mni')])
            ])

        # DARTEL template
        # ===============================
        dartel_template = npe.Node(spm.DARTEL(),
                                   name='dartel_template')

        if self.parameters['iteration_parameters'] is not None:
            dartel_template.inputs.iteration_parameters = self.parameters['iteration_parameters']
        if self.parameters['optimization_parameters'] is not None:
            dartel_template.inputs.optimization_parameters = self.parameters['optimization_parameters']
        if self.parameters['regularization_form'] is not None:
            dartel_template.inputs.regularization_form = self.parameters['regularization_form']

        # DARTEL2MNI Registration
        # =======================
        dartel2mni_node = npe.MapNode(spm.DARTELNorm2MNI(),
                                      name='dartel2MNI',
                                      iterfield=['apply_to_files', 'flowfield_files'])

        if self.parameters['bounding_box'] is not None:
            dartel2mni_node.inputs.bounding_box = self.parameters['bounding_box']
        if self.parameters['voxel_size'] is not None:
            dartel2mni_node.inputs.voxel_size = self.parameters['voxel_size']
        dartel2mni_node.inputs.modulate = self.parameters['modulation']
        dartel2mni_node.inputs.fwhm = 0

        # Smoothing
        # =========
        if self.parameters['fwhm'] is not None and len(self.parameters['fwhm']) > 0:
            smoothing_node = npe.MapNode(spm.Smooth(),
                                         name='smoothing_node',
                                         iterfield=['in_files'])

            smoothing_node.iterables = [('fwhm', [[x, x, x] for x in self.parameters['fwhm']]),
                                        ('out_prefix', ['fwhm-' + str(x) + 'mm_' for x in self.parameters['fwhm']])]
            smoothing_node.synchronize = True

            join_smoothing_node = npe.JoinNode(interface=nutil.Function(input_names=['smoothed_normalized_files'],
                                                                        output_names=['smoothed_normalized_files'],
                                                                        function=dartel2mni_utils.join_smoothed_files),
                                               joinsource='smoothing_node',
                                               joinfield='smoothed_normalized_files',
                                               name='join_smoothing_node')
            self.connect([
                (dartel2mni_node, smoothing_node, [('normalized_files', 'in_files')]),
                (smoothing_node, join_smoothing_node, [('smoothed_files', 'smoothed_normalized_files')]),
                (join_smoothing_node, self.output_node, [('smoothed_normalized_files', 'smoothed_normalized_files')])
            ])
        else:
            self.output_node.inputs.smoothed_normalized_files = []

        # Atlas Statistics
        # ================
        atlas_stats_node = npe.MapNode(nutil.Function(input_names=['in_image',
                                                                   'in_atlas_list'],
                                                      output_names=['atlas_statistics'],
                                                      function=dartel2mni_utils.atlas_statistics),
                                       name='atlas_stats_node',
                                       iterfield=['in_image'])
        atlas_stats_node.inputs.in_atlas_list = self.parameters['atlas_list']

        # Connection
        # ==========
        self.connect([
            (self.input_node, unzip_node, [('input_images', 'in_file')]),
            (unzip_node, new_segment, [('out_file', 'channel_files')]),
            (new_segment, self.output_node, [('bias_corrected_images', 'bias_corrected_images'),
                                             ('bias_field_images', 'bias_field_images'),
                                             ('dartel_input_images', 'dartel_input_images'),
                                             ('forward_deformation_field', 'forward_deformation_field'),
                                             ('inverse_deformation_field', 'inverse_deformation_field'),
                                             ('modulated_class_images', 'modulated_class_images'),
                                             ('native_class_images', 'native_class_images'),
                                             ('normalized_class_images', 'normalized_class_images'),
                                             ('transformation_mat', 'transformation_mat')]),
            (new_segment, dartel_template, [(('dartel_input_images', dartel_utils.get_class_images,
                                              self.parameters['dartel_tissues']), 'image_files')]),
            (dartel_template, self.output_node, [('dartel_flow_fields', 'dartel_flow_fields'),
                                                 ('final_template_file', 'final_template_file'),
                                                 ('template_files', 'template_files')]),
            (new_segment, dartel2mni_node, [(('native_class_images', seg_utils.group_nested_images_by_subject),
                                             'apply_to_files')]),
            (dartel_template, dartel2mni_node, [(('dartel_flow_fields', dartel2mni_utils.prepare_flowfields,
                                                  self.parameters['tissue_classes']), 'flowfield_files')]),
            (dartel_template, dartel2mni_node, [('final_template_file', 'template_file')]),
            (dartel2mni_node, self.output_node, [('normalized_files', 'normalized_files')]),
            (dartel2mni_node, atlas_stats_node, [(('normalized_files', dartel2mni_utils.select_gm_images),
                                                  'in_image')]),
            (atlas_stats_node, self.output_node, [('atlas_statistics', 'atlas_statistics')])
        ])
Exemple #25
0
def attach_spm_fmri_grouptemplate_wf(main_wf, wf_name='spm_epi_grouptemplate'):
    """ Attach a fMRI pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_fmri_preproc outputs 'fmri_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow


    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The slice time and motion corrected fMRI file.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    rest_output.avg_epi_mni: input node

    datasink: nipype Node

    spm_rest_preproc_mni: nipype Workflow

    Nipype Outputs
    --------------
    group_template.fmri_template: file
        The path to the fMRI group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - fmri_cleanup: for the `rest_output.avg_epi` output

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    fmri_cleanup_wf = get_subworkflow(main_wf, 'fmri_cleanup')

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'rest' file for the substitutions
    fmri_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'rest')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(fmri_fbasename))
    grp_datasink.inputs.container = '{}_grouptemplate'.format(fmri_fbasename)

    # the list of the average EPIs from all the subjects
    # avg_epi_map = pe.MapNode(IdentityInterface(fields=['avg_epis']), iterfield=['avg_epis'], name='avg_epi_map')

    avg_epis = pe.JoinNode(IdentityInterface(fields=['avg_epis']),
                           joinsource='infosrc',
                           joinfield='avg_epis',
                           name='avg_epis')

    # directly warp the avg EPI to the SPM standard template
    warp_epis = spm_warp_to_mni("spm_warp_avgepi_to_mni")

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=['fmri_template']),
                        name='group_template')

    # group dataSink output substitutions
    regexp_subst = [
        (r'/wgrptemplate{fmri}_merged_mean_smooth.nii$',
         '/{fmri}_grouptemplate_mni.nii'),
        (r'/w{fmri}_merged_mean_smooth.nii$', '/{fmri}_grouptemplate_mni.nii'),
    ]
    regexp_subst = format_pair_list(regexp_subst, fmri=fmri_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # the avg EPI inputs
        (fmri_cleanup_wf, avg_epis, [('rest_output.avg_epi', 'avg_epis')]),

        # warp avg EPIs to MNI
        (avg_epis, warp_epis, [('avg_epis', 'warp_input.in_files')]),

        # group template wf
        (warp_epis, template_wf, [('warp_output.warped_files',
                                   'grptemplate_input.in_files')]),

        # output node
        (template_wf, output, [('grptemplate_output.template', 'fmri_template')
                               ]),

        # template output
        (output, grp_datasink, [('fmri_template', '@fmri_group_template')]),
        (warp_epis, grp_datasink, [('warp_output.warped_files',
                                    'individuals.@warped')]),
    ])

    return main_wf
Exemple #26
0
def init_fmridenoise_wf(bids_dir,
                        derivatives='fmriprep',
                        task=[],
                        session=[],
                        subject=[],
                        pipelines_paths=get_pipelines_paths(),
                        smoothing=True,
                        ica_aroma=False,
                        high_pass=0.008,
                        low_pass=0.08,
                        # desc=None,
                        # ignore=None, force_index=None,
                        base_dir='/tmp/fmridenoise/', name='fmridenoise_wf'
                        ):
    workflow = pe.Workflow(name=name, base_dir=base_dir)
    temps.base_dir = base_dir

    # 1) --- Selecting pipeline

    # Inputs: fulfilled
    pipelineselector = pe.Node(
       PipelineSelector(),
       name="PipelineSelector")
    pipelineselector.iterables = ('pipeline_path', pipelines_paths)
    # Outputs: pipeline, pipeline_name, low_pass, high_pass

    # 2) --- Loading BIDS structure

    # Inputs: directory, task, derivatives
    grabbing_bids = pe.Node(
        BIDSGrab(
            bids_dir=bids_dir,
            derivatives=derivatives,
            task=task,
            session=session,
            subject=subject,
            ica_aroma=ica_aroma
        ),
        name="BidsGrabber")
    # Outputs: fmri_prep, conf_raw, conf_json, entities, tr_dict

    # 3) --- Confounds preprocessing

    # Inputs: pipeline, conf_raw, conf_json
    temppath = os.path.join(base_dir, 'prep_conf')
    prep_conf = pe.MapNode(
        Confounds(
            output_dir=temps.mkdtemp(temppath)
        ),
        iterfield=['conf_raw', 'conf_json', 'entities'],
        name="ConfPrep")
    # Outputs: conf_prep, low_pass, high_pass

    # 4) --- Denoising

    # Inputs: conf_prep, low_pass, high_pass
    iterate = ['fmri_prep', 'conf_prep', 'entities']
    if not ica_aroma:
        iterate = iterate
    else:
        iterate.append('fmri_prep_aroma')

    temppath = os.path.join(base_dir, 'denoise')
    denoise = pe.MapNode(
        Denoise(
            smoothing=smoothing,
            high_pass=high_pass,
            low_pass=low_pass,
            ica_aroma=ica_aroma,
            output_dir=temps.mkdtemp(temppath)
        ),
        iterfield=iterate,
        name="Denoiser", mem_gb=6)
    # Outputs: fmri_denoised

    # 5) --- Connectivity estimation

    # Inputs: fmri_denoised
    temppath = os.path.join(base_dir, 'connectivity')
    parcellation_path = get_parcelation_file_path()
    connectivity = pe.MapNode(
        Connectivity(
            output_dir=temps.mkdtemp(temppath),
            parcellation=parcellation_path
        ),
        iterfield=['fmri_denoised'],
        name='ConnCalc')
    # Outputs: conn_mat, carpet_plot

    # 6) --- Group confounds

    # Inputs: conf_summary, pipeline_name
    # FIXME BEGIN
    # This is part of temporary solution.
    # Group nodes write to bids dir insted of tmp and let files be grabbed by datasink
    os.makedirs(os.path.join(bids_dir, 'derivatives', 'fmridenoise'), exist_ok=True)
    # FIXME END
    group_conf_summary = pe.Node(
        GroupConfounds(
            output_dir=os.path.join(bids_dir, 'derivatives', 'fmridenoise'),
        ),
        name="GroupConf")

    # Outputs: group_conf_summary

    # 7) --- Group connectivity

    # Inputs: corr_mat, pipeline_name

    group_connectivity = pe.Node(
        GroupConnectivity(
            output_dir=os.path.join(bids_dir, 'derivatives', 'fmridenoise'),
        ),
        name="GroupConn")

    # Outputs: group_corr_mat

    # 8) --- Quality measures

    # Inputs: group_corr_mat, group_conf_summary, pipeline_name

    quality_measures = pe.MapNode(
        QualityMeasures(
            output_dir=os.path.join(bids_dir, 'derivatives', 'fmridenoise'),
            distance_matrix=get_distance_matrix_file_path()
        ),
        iterfield=['group_corr_mat', 'group_conf_summary'],
        name="QualityMeasures")
    # Outputs: fc_fd_summary, edges_weight, edges_weight_clean

    # 9) --- Merge quality measures into lists for further processing

    # Inputs: fc_fd_summary, edges_weight, edges_weight_clean

    merge_quality_measures = pe.JoinNode(MergeGroupQualityMeasures(),
                                         joinsource=pipelineselector,
                                         name="Merge")

    # Outputs: fc_fd_summary, edges_weight

    # 10) --- Quality measures across pipelines

    # Inputs: fc_fd_summary, edges_weight

    pipelines_quality_measures = pe.Node(
        PipelinesQualityMeasures(
            output_dir=os.path.join(bids_dir, 'derivatives', 'fmridenoise'),
        ),
        name="PipelinesQC")

    # Outputs: pipelines_fc_fd_summary, pipelines_edges_weight

    # 11) --- Report from data

    report_creator = pe.JoinNode(
        ReportCreator(
            group_data_dir=os.path.join(bids_dir, 'derivatives', 'fmridenoise')
        ),
        joinsource=pipelineselector,
        joinfield=['pipelines', 'pipelines_names'],
        name='ReportCreator')

    # 12) --- Save derivatives
    # TODO: Fill missing in/out
    ds_confounds = pe.MapNode(BIDSDataSink(base_directory=bids_dir),
                    iterfield=['in_file', 'entities'],
                    name="ds_confounds")

    ds_denoise = pe.MapNode(BIDSDataSink(base_directory=bids_dir),
                    iterfield=['in_file', 'entities'],
                    name="ds_denoise")

    ds_connectivity = pe.MapNode(BIDSDataSink(base_directory=bids_dir),
                    iterfield=['in_file', 'entities'],
                    name="ds_connectivity")

    ds_carpet_plot = pe.MapNode(BIDSDataSink(base_directory=bids_dir),
                                 iterfield=['in_file', 'entities'],
                                 name="ds_carpet_plot")

    ds_matrix_plot = pe.MapNode(BIDSDataSink(base_directory=bids_dir),
                                 iterfield=['in_file', 'entities'],
                                 name="ds_matrix_plot")


# --- Connecting nodes

    workflow.connect([
        (grabbing_bids, denoise, [('tr_dict', 'tr_dict')]),
        (grabbing_bids, denoise, [('fmri_prep', 'fmri_prep'),
                                  ('fmri_prep_aroma', 'fmri_prep_aroma')]),
        (grabbing_bids, denoise, [('entities', 'entities')]),
        (grabbing_bids, prep_conf, [('conf_raw', 'conf_raw'),
                                    ('conf_json', 'conf_json'),
                                    ('entities', 'entities')]),
        (grabbing_bids, ds_confounds, [('entities', 'entities')]),
        (grabbing_bids, ds_denoise, [('entities', 'entities')]),
        (grabbing_bids, ds_connectivity, [('entities', 'entities')]),
        (grabbing_bids, ds_carpet_plot, [('entities', 'entities')]),
        (grabbing_bids, ds_matrix_plot, [('entities', 'entities')]),

        (pipelineselector, prep_conf, [('pipeline', 'pipeline')]),
        (pipelineselector, denoise, [('pipeline', 'pipeline')]),
        (prep_conf, group_conf_summary, [('conf_summary', 'conf_summary'),
                                        ('pipeline_name', 'pipeline_name')]),

        (pipelineselector, ds_denoise, [('pipeline_name', 'pipeline_name')]),
        (pipelineselector, ds_connectivity, [('pipeline_name', 'pipeline_name')]),
        (pipelineselector, ds_confounds, [('pipeline_name', 'pipeline_name')]),
        (pipelineselector, ds_carpet_plot, [('pipeline_name', 'pipeline_name')]),
        (pipelineselector, ds_matrix_plot, [('pipeline_name', 'pipeline_name')]),

        (prep_conf, denoise, [('conf_prep', 'conf_prep')]),
        (denoise, connectivity, [('fmri_denoised', 'fmri_denoised')]),

        (prep_conf, group_connectivity, [('pipeline_name', 'pipeline_name')]),
        (connectivity, group_connectivity, [('corr_mat', 'corr_mat')]),

        (prep_conf, ds_confounds, [('conf_prep', 'in_file')]),
        (denoise, ds_denoise, [('fmri_denoised', 'in_file')]),
        (connectivity, ds_connectivity, [('corr_mat', 'in_file')]),
        (connectivity, ds_carpet_plot, [('carpet_plot', 'in_file')]),
        (connectivity, ds_matrix_plot, [('matrix_plot', 'in_file')]),

        (group_connectivity, quality_measures, [('pipeline_name', 'pipeline_name'),
                                                ('group_corr_mat', 'group_corr_mat')]),
        (group_conf_summary, quality_measures, [('group_conf_summary', 'group_conf_summary')]),
        (quality_measures, merge_quality_measures, [('fc_fd_summary', 'fc_fd_summary'),
                                                    ('edges_weight', 'edges_weight'),
                                                    ('edges_weight_clean', 'edges_weight_clean'),
                                                    ('exclude_list', 'exclude_list')]),
        (merge_quality_measures, pipelines_quality_measures,
            [('fc_fd_summary', 'fc_fd_summary'),
             ('edges_weight', 'edges_weight'),
             ('edges_weight_clean', 'edges_weight_clean')]),
        (merge_quality_measures, report_creator,
            [('exclude_list', 'excluded_subjects')]),
        (pipelines_quality_measures, report_creator,
            [('plot_pipeline_edges_density', 'plot_pipeline_edges_density'),
             ('plot_pipelines_edges_density_no_high_motion', 'plot_pipelines_edges_density_no_high_motion'),
             ('plot_pipelines_fc_fd_pearson', 'plot_pipelines_fc_fd_pearson'),
             ('plot_pipelines_fc_fd_uncorr', 'plot_pipelines_fc_fd_uncorr'),
             ('plot_pipelines_distance_dependence', 'plot_pipelines_distance_dependence')]),
        (pipelineselector, report_creator,
            [('pipeline', 'pipelines'),
             ('pipeline_name', 'pipelines_names')])
    ])

    return workflow
Exemple #27
0
def init_asl_surf_wf(mem_gb,
                     surface_spaces,
                     medial_surface_nan,
                     name='asl_surf_wf'):
    """
    Sample functional images to FreeSurfer surfaces.

    For each vertex, the cortical ribbon is sampled at six points (spaced 20% of thickness apart)
    and averaged.
    Outputs are in GIFTI format.

    Workflow Graph
        .. workflow::
            :graph2use: colored
            :simple_form: yes

            from aslprep.workflows.asl import init_asl_surf_wf
            wf = init_asl_surf_wf(mem_gb=0.1,
                                   surface_spaces=['fsnative', 'fsaverage5'],
                                   medial_surface_nan=False)

    Parameters
    ----------
    surface_spaces : :obj:`list`
        List of FreeSurfer surface-spaces (either ``fsaverage{3,4,5,6,}`` or ``fsnative``)
        the functional images are to be resampled to.
        For ``fsnative``, images will be resampled to the individual subject's
        native surface.
    medial_surface_nan : :obj:`bool`
        Replace medial wall values with NaNs on functional GIFTI files

    Inputs
    ------
    source_file
        Motion-corrected ASL series in T1 space
    t1w_preproc
        Bias-corrected structural template image
    subjects_dir
        FreeSurfer SUBJECTS_DIR
    subject_id
        FreeSurfer subject ID
    t1w2fsnative_xfm
        LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space

    Outputs
    -------
    surfaces
        ASL series, resampled to FreeSurfer surfaces

    """
    from nipype.interfaces.io import FreeSurferSource
    from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
    from ...niworkflows.interfaces.surf import GiftiSetAnatomicalStructure

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
The ASL time-series were resampled onto the following surfaces
(FreeSurfer reconstruction nomenclature):
{out_spaces}.
""".format(out_spaces=', '.join(['*%s*' % s for s in surface_spaces]))

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'source_file', 'subject_id', 'subjects_dir', 't1w2fsnative_xfm'
    ]),
                        name='inputnode')
    itersource = pe.Node(niu.IdentityInterface(fields=['target']),
                         name='itersource')
    itersource.iterables = [('target', surface_spaces)]

    get_fsnative = pe.Node(FreeSurferSource(),
                           name='get_fsnative',
                           run_without_submitting=True)

    def select_target(subject_id, space):
        """Get the target subject ID, given a source subject ID and a target space."""
        return subject_id if space == 'fsnative' else space

    targets = pe.Node(niu.Function(function=select_target),
                      name='targets',
                      run_without_submitting=True,
                      mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Rename the source file to the output space to simplify naming later
    rename_src = pe.Node(niu.Rename(format_string='%(subject)s',
                                    keep_ext=True),
                         name='rename_src',
                         run_without_submitting=True,
                         mem_gb=DEFAULT_MEMORY_MIN_GB)
    itk2lta = pe.Node(niu.Function(function=_itk2lta),
                      name="itk2lta",
                      run_without_submitting=True)
    sampler = pe.MapNode(fs.SampleToSurface(
        cortex_mask=True,
        interp_method='trilinear',
        out_type='gii',
        override_reg_subj=True,
        sampling_method='average',
        sampling_range=(0, 1, 0.2),
        sampling_units='frac',
    ),
                         iterfield=['hemi'],
                         name='sampler',
                         mem_gb=mem_gb * 3)
    sampler.inputs.hemi = ['lh', 'rh']
    update_metadata = pe.MapNode(GiftiSetAnatomicalStructure(),
                                 iterfield=['in_file'],
                                 name='update_metadata',
                                 mem_gb=DEFAULT_MEMORY_MIN_GB)

    outputnode = pe.JoinNode(
        niu.IdentityInterface(fields=['surfaces', 'target']),
        joinsource='itersource',
        name='outputnode')

    workflow.connect([
        (inputnode, get_fsnative, [('subject_id', 'subject_id'),
                                   ('subjects_dir', 'subjects_dir')]),
        (inputnode, targets, [('subject_id', 'subject_id')]),
        (inputnode, rename_src, [('source_file', 'in_file')]),
        (inputnode, itk2lta, [('source_file', 'src_file'),
                              ('t1w2fsnative_xfm', 'in_file')]),
        (get_fsnative, itk2lta, [('T1', 'dst_file')]),
        (inputnode, sampler, [('subjects_dir', 'subjects_dir'),
                              ('subject_id', 'subject_id')]),
        (itersource, targets, [('target', 'space')]),
        (itersource, rename_src, [('target', 'subject')]),
        (itk2lta, sampler, [('out', 'reg_file')]),
        (targets, sampler, [('out', 'target_subject')]),
        (rename_src, sampler, [('out_file', 'source_file')]),
        (update_metadata, outputnode, [('out_file', 'surfaces')]),
        (itersource, outputnode, [('target', 'target')]),
    ])

    if not medial_surface_nan:
        workflow.connect(sampler, 'out_file', update_metadata, 'in_file')
        return workflow

    from ...niworkflows.interfaces.freesurfer import MedialNaNs
    # Refine if medial vertices should be NaNs
    medial_nans = pe.MapNode(MedialNaNs(),
                             iterfield=['in_file'],
                             name='medial_nans',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, medial_nans, [('subjects_dir', 'subjects_dir')]),
        (sampler, medial_nans, [('out_file', 'in_file')]),
        (medial_nans, update_metadata, [('out_file', 'in_file')]),
    ])
    return workflow
Exemple #28
0
def init_bold_std_trans_wf(freesurfer,
                           mem_gb,
                           omp_nthreads,
                           standard_spaces,
                           name='bold_std_trans_wf',
                           use_compression=True,
                           use_fieldwarp=False):
    """
    This workflow samples functional images into standard space with a single
    resampling of the original BOLD series.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from collections import OrderedDict
        from fmriprep.workflows.bold import init_bold_std_trans_wf
        wf = init_bold_std_trans_wf(
            freesurfer=True,
            mem_gb=3,
            omp_nthreads=1,
            standard_spaces=OrderedDict([('MNI152Lin', {}),
                                         ('fsaverage', {'density': '10k'})]),
        )

    **Parameters**

        freesurfer : bool
            Whether to generate FreeSurfer's aseg/aparc segmentations on BOLD space.
        mem_gb : float
            Size of BOLD file in GB
        omp_nthreads : int
            Maximum number of threads an individual process may use
        standard_spaces : OrderedDict
            Ordered dictionary where keys are TemplateFlow ID strings (e.g.,
            ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``),
            or paths pointing to custom templates organized in a TemplateFlow-like structure.
            Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
            could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
            resolution version of the selected template).
        name : str
            Name of workflow (default: ``bold_std_trans_wf``)
        use_compression : bool
            Save registered BOLD series as ``.nii.gz``
        use_fieldwarp : bool
            Include SDC warp in single-shot transform from BOLD to MNI

    **Inputs**

        anat2std_xfm
            List of anatomical-to-standard space transforms generated during
            spatial normalization.
        bold_aparc
            FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        bold_aseg
            FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
            (only if ``recon-all`` was run).
        bold_mask
            Skull-stripping mask of reference image
        bold_split
            Individual 3D volumes, not motion corrected
        fieldwarp
            a :abbr:`DFM (displacements field map)` in ITK format
        hmc_xforms
            List of affine transforms aligning each volume to ``ref_image`` in ITK format
        itk_bold_to_t1
            Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
        name_source
            BOLD series NIfTI file
            Used to recover original information lost during processing
        templates
            List of templates that were applied as targets during
            spatial normalization.

    **Outputs** - Two outputnodes are available. One output node (with name ``poutputnode``)
    will be parameterized in a Nipype sense (see `Nipype iterables
    <https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a
    second node (``outputnode``) will collapse the parameterized outputs into synchronous
    lists of the following fields:

        bold_std
            BOLD series, resampled to template space
        bold_std_ref
            Reference, contrast-enhanced summary of the BOLD series, resampled to template space
        bold_mask_std
            BOLD series mask in template space
        bold_aseg_std
            FreeSurfer's ``aseg.mgz`` atlas, in template space at the BOLD resolution
            (only if ``recon-all`` was run)
        bold_aparc_std
            FreeSurfer's ``aparc+aseg.mgz`` atlas, in template space at the BOLD resolution
            (only if ``recon-all`` was run)
        templates
            Template identifiers synchronized correspondingly to previously
            described outputs.

    """

    # Filter ``standard_spaces``
    vol_std_spaces = [
        k for k in standard_spaces.keys() if not k.startswith('fs')
    ]

    workflow = Workflow(name=name)

    if len(vol_std_spaces) == 1:
        workflow.__desc__ = """\
The BOLD time-series were resampled into standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(tpl=vol_std_spaces)
    else:
        workflow.__desc__ = """\
The BOLD time-series were resampled into several standard spaces,
correspondingly generating the following *spatially-normalized,
preprocessed BOLD runs*: {tpl}.
""".format(tpl=', '.join(vol_std_spaces))

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'anat2std_xfm',
        'bold_aparc',
        'bold_aseg',
        'bold_mask',
        'bold_split',
        'fieldwarp',
        'hmc_xforms',
        'itk_bold_to_t1',
        'name_source',
        'templates',
    ]),
                        name='inputnode')

    select_std = pe.Node(KeySelect(fields=['resolution', 'anat2std_xfm']),
                         name='select_std',
                         run_without_submitting=True)

    select_std.inputs.resolution = [
        v.get('resolution') or v.get('res') or 'native'
        for k, v in list(standard_spaces.items()) if k in vol_std_spaces
    ]
    select_std.iterables = ('key', vol_std_spaces)

    select_tpl = pe.Node(niu.Function(function=_select_template),
                         name='select_tpl',
                         run_without_submitting=True)
    select_tpl.inputs.template_specs = standard_spaces

    gen_ref = pe.Node(GenerateSamplingReference(), name='gen_ref',
                      mem_gb=0.3)  # 256x256x256 * 64 / 8 ~ 150MB)

    mask_std_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                           float=True),
                           name='mask_std_tfm',
                           mem_gb=1)

    # Write corrected file in the designated output dir
    mask_merge_tfms = pe.Node(niu.Merge(2),
                              name='mask_merge_tfms',
                              run_without_submitting=True,
                              mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, select_std, [('templates', 'keys'),
                                 ('anat2std_xfm', 'anat2std_xfm')]),
        (inputnode, mask_std_tfm, [('bold_mask', 'input_image')]),
        (inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]),
        (inputnode, mask_merge_tfms, [(('itk_bold_to_t1', _aslist), 'in2')]),
        (select_std, select_tpl, [('key', 'template')]),
        (select_std, mask_merge_tfms, [('anat2std_xfm', 'in1')]),
        (select_std, gen_ref, [(('resolution', _is_native), 'keep_native')]),
        (select_tpl, gen_ref, [('out', 'fixed_image')]),
        (mask_merge_tfms, mask_std_tfm, [('out', 'transforms')]),
        (gen_ref, mask_std_tfm, [('out_file', 'reference_image')]),
    ])

    nxforms = 4 if use_fieldwarp else 3
    merge_xforms = pe.Node(niu.Merge(nxforms),
                           name='merge_xforms',
                           run_without_submitting=True,
                           mem_gb=DEFAULT_MEMORY_MIN_GB)
    workflow.connect([(inputnode, merge_xforms, [('hmc_xforms',
                                                  'in%d' % nxforms)])])

    if use_fieldwarp:
        workflow.connect([(inputnode, merge_xforms, [('fieldwarp', 'in3')])])

    bold_to_std_transform = pe.Node(MultiApplyTransforms(
        interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
                                    name='bold_to_std_transform',
                                    mem_gb=mem_gb * 3 * omp_nthreads,
                                    n_procs=omp_nthreads)

    merge = pe.Node(Merge(compress=use_compression),
                    name='merge',
                    mem_gb=mem_gb * 3)

    # Generate a reference on the target T1w space
    gen_final_ref = init_bold_reference_wf(omp_nthreads=omp_nthreads,
                                           pre_mask=True)

    workflow.connect([
        (inputnode, merge_xforms, [(('itk_bold_to_t1', _aslist), 'in2')]),
        (inputnode, merge, [('name_source', 'header_source')]),
        (inputnode, bold_to_std_transform, [('bold_split', 'input_image')]),
        (select_std, merge_xforms, [('anat2std_xfm', 'in1')]),
        (merge_xforms, bold_to_std_transform, [('out', 'transforms')]),
        (gen_ref, bold_to_std_transform, [('out_file', 'reference_image')]),
        (bold_to_std_transform, merge, [('out_files', 'in_files')]),
        (merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
        (mask_std_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')
                                       ]),
    ])

    # Connect output nodes
    output_names = ['bold_std', 'bold_std_ref', 'bold_mask_std', 'templates']
    if freesurfer:
        output_names += ['bold_aseg_std', 'bold_aparc_std']

    # poutputnode - parametric output node
    poutputnode = pe.Node(niu.IdentityInterface(fields=output_names),
                          name='poutputnode')

    workflow.connect([
        (gen_final_ref, poutputnode, [('outputnode.ref_image', 'bold_std_ref')
                                      ]),
        (merge, poutputnode, [('out_file', 'bold_std')]),
        (mask_std_tfm, poutputnode, [('output_image', 'bold_mask_std')]),
        (select_std, poutputnode, [('key', 'templates')]),
    ])

    if freesurfer:
        # Sample the parcellation files to functional space
        aseg_std_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                               float=True),
                               name='aseg_std_tfm',
                               mem_gb=1)
        aparc_std_tfm = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                float=True),
                                name='aparc_std_tfm',
                                mem_gb=1)

        workflow.connect([
            (inputnode, aseg_std_tfm, [('bold_aseg', 'input_image')]),
            (inputnode, aparc_std_tfm, [('bold_aparc', 'input_image')]),
            (select_std, aseg_std_tfm, [('anat2std_xfm', 'transforms')]),
            (select_std, aparc_std_tfm, [('anat2std_xfm', 'transforms')]),
            (gen_ref, aseg_std_tfm, [('out_file', 'reference_image')]),
            (gen_ref, aparc_std_tfm, [('out_file', 'reference_image')]),
            (aseg_std_tfm, poutputnode, [('output_image', 'bold_aseg_std')]),
            (aparc_std_tfm, poutputnode, [('output_image', 'bold_aparc_std')]),
        ])

    # Connect outputnode to the parameterized outputnode
    outputnode = pe.JoinNode(niu.IdentityInterface(fields=output_names),
                             name='outputnode',
                             joinsource='select_std')
    workflow.connect([(poutputnode, outputnode, [(f, f)
                                                 for f in output_names])])

    return workflow
Exemple #29
0
def init_bold_surf_wf(mem_gb,
                      output_spaces,
                      medial_surface_nan,
                      name='bold_surf_wf'):
    """
    This workflow samples functional images to FreeSurfer surfaces

    For each vertex, the cortical ribbon is sampled at six points (spaced 20% of thickness apart)
    and averaged.

    Outputs are in GIFTI format.

    .. workflow::
        :graph2use: colored
        :simple_form: yes

        from fmriprep.workflows.bold import init_bold_surf_wf
        wf = init_bold_surf_wf(mem_gb=0.1,
                               output_spaces=['T1w', 'fsnative',
                                             'template', 'fsaverage5'],
                               medial_surface_nan=False)

    **Parameters**

        output_spaces : list
            List of output spaces functional images are to be resampled to
            Target spaces beginning with ``fs`` will be selected for resampling,
            such as ``fsaverage`` or related template spaces
            If the list contains ``fsnative``, images will be resampled to the
            individual subject's native surface
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files

    **Inputs**

        source_file
            Motion-corrected BOLD series in T1 space
        t1_preproc
            Bias-corrected structural template image
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space

    **Outputs**

        surfaces
            BOLD series, resampled to FreeSurfer surfaces

    """
    # Ensure volumetric spaces do not sneak into this workflow
    spaces = [space for space in output_spaces if space.startswith('fs')]

    workflow = Workflow(name=name)

    if spaces:
        workflow.__desc__ = """\
The BOLD time-series, were resampled to surfaces on the following
spaces: {out_spaces}.
""".format(out_spaces=', '.join(['*%s*' % s for s in spaces]))
    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'source_file', 't1_preproc', 'subject_id', 'subjects_dir',
        't1_2_fsnative_forward_transform'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=['surfaces']),
                         name='outputnode')

    def select_target(subject_id, space):
        """ Given a source subject ID and a target space, get the target subject ID """
        return subject_id if space == 'fsnative' else space

    targets = pe.MapNode(niu.Function(function=select_target),
                         iterfield=['space'],
                         name='targets',
                         mem_gb=DEFAULT_MEMORY_MIN_GB)
    targets.inputs.space = spaces

    # Rename the source file to the output space to simplify naming later
    rename_src = pe.MapNode(niu.Rename(format_string='%(subject)s',
                                       keep_ext=True),
                            iterfield='subject',
                            name='rename_src',
                            run_without_submitting=True,
                            mem_gb=DEFAULT_MEMORY_MIN_GB)
    rename_src.inputs.subject = spaces

    resampling_xfm = pe.Node(LTAConvert(in_lta='identity.nofile',
                                        out_lta=True),
                             name='resampling_xfm')
    set_xfm_source = pe.Node(ConcatenateLTA(out_type='RAS2RAS'),
                             name='set_xfm_source')

    sampler = pe.MapNode(fs.SampleToSurface(sampling_method='average',
                                            sampling_range=(0, 1, 0.2),
                                            sampling_units='frac',
                                            interp_method='trilinear',
                                            cortex_mask=True,
                                            override_reg_subj=True,
                                            out_type='gii'),
                         iterfield=['source_file', 'target_subject'],
                         iterables=('hemi', ['lh', 'rh']),
                         name='sampler',
                         mem_gb=mem_gb * 3)

    medial_nans = pe.MapNode(MedialNaNs(),
                             iterfield=['in_file', 'target_subject'],
                             name='medial_nans',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    merger = pe.JoinNode(niu.Merge(1, ravel_inputs=True),
                         name='merger',
                         joinsource='sampler',
                         joinfield=['in1'],
                         run_without_submitting=True,
                         mem_gb=DEFAULT_MEMORY_MIN_GB)

    update_metadata = pe.MapNode(GiftiSetAnatomicalStructure(),
                                 iterfield='in_file',
                                 name='update_metadata',
                                 mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (inputnode, targets, [('subject_id', 'subject_id')]),
        (inputnode, rename_src, [('source_file', 'in_file')]),
        (inputnode, resampling_xfm, [('source_file', 'source_file'),
                                     ('t1_preproc', 'target_file')]),
        (inputnode, set_xfm_source, [('t1_2_fsnative_forward_transform',
                                      'in_lta2')]),
        (resampling_xfm, set_xfm_source, [('out_lta', 'in_lta1')]),
        (inputnode, sampler, [('subjects_dir', 'subjects_dir'),
                              ('subject_id', 'subject_id')]),
        (set_xfm_source, sampler, [('out_file', 'reg_file')]),
        (targets, sampler, [('out', 'target_subject')]),
        (rename_src, sampler, [('out_file', 'source_file')]),
        (merger, update_metadata, [('out', 'in_file')]),
        (update_metadata, outputnode, [('out_file', 'surfaces')]),
    ])

    if medial_surface_nan:
        workflow.connect([
            (inputnode, medial_nans, [('subjects_dir', 'subjects_dir')]),
            (sampler, medial_nans, [('out_file', 'in_file')]),
            (targets, medial_nans, [('out', 'target_subject')]),
            (medial_nans, merger, [('out_file', 'in1')]),
        ])
    else:
        workflow.connect(sampler, 'out_file', merger, 'in1')

    return workflow
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipelines.
        """

        import os
        import nipype.interfaces.spm as spm
        import nipype.interfaces.matlab as mlab
        import nipype.pipeline.engine as npe
        import nipype.interfaces.utility as nutil
        from clinica.utils.io import unzip_nii
        from clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_utils import prepare_flowfields, join_smoothed_files, atlas_statistics, select_gm_images

        spm_home = os.getenv("SPM_HOME")
        mlab_home = os.getenv("MATLABCMD")
        mlab.MatlabCommand.set_default_matlab_cmd(mlab_home)
        mlab.MatlabCommand.set_default_paths(spm_home)

        if 'SPMSTANDALONE_HOME' in os.environ:
            if 'MCR_HOME' in os.environ:
                matlab_cmd = os.path.join(os.environ['SPMSTANDALONE_HOME'],
                                          'run_spm12.sh') \
                             + ' ' + os.environ['MCR_HOME'] \
                             + ' script'
                spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd,
                                              use_mcr=True)
                version = spm.SPMCommand().version
            else:
                raise EnvironmentError(
                    'MCR_HOME variable not in environnement. Althought, ' +
                    'SPMSTANDALONE_HOME has been found')
        else:
            version = spm.Info.getinfo()

        if version:
            if isinstance(version, dict):
                spm_path = version['path']
                if version['name'] == 'SPM8':
                    print(
                        'You are using SPM version 8. The recommended version to use with Clinica is SPM 12. '
                        + 'Please upgrade your SPM toolbox.')
                    tissue_map = os.path.join(spm_path, 'toolbox/Seg/TPM.nii')
                elif version['name'] == 'SPM12':
                    tissue_map = os.path.join(spm_path, 'tpm/TPM.nii')
                else:
                    raise RuntimeError(
                        'SPM version 8 or 12 could not be found. Please upgrade your SPM toolbox.'
                    )
            if isinstance(version, str):
                if float(version) >= 12.7169:
                    tissue_map = os.path.join(
                        str(spm_home), 'spm12_mcr/spm/spm12/tpm/TPM.nii')
                else:
                    raise RuntimeError(
                        'SPM standalone version not supported. Please upgrade SPM standalone.'
                    )
        else:
            raise RuntimeError(
                'SPM could not be found. Please verify your SPM_HOME environment variable.'
            )

        # Unzipping
        # =========
        unzip_tissues_node = npe.MapNode(nutil.Function(
            input_names=['in_file'],
            output_names=['out_file'],
            function=unzip_nii),
                                         name='unzip_tissues_node',
                                         iterfield=['in_file'])
        unzip_flowfields_node = npe.MapNode(nutil.Function(
            input_names=['in_file'],
            output_names=['out_file'],
            function=unzip_nii),
                                            name='unzip_flowfields_node',
                                            iterfield=['in_file'])
        unzip_template_node = npe.Node(nutil.Function(
            input_names=['in_file'],
            output_names=['out_file'],
            function=unzip_nii),
                                       name='unzip_template_node')

        # DARTEL2MNI Registration
        # =======================
        dartel2mni_node = npe.MapNode(
            spm.DARTELNorm2MNI(),
            name='dartel2MNI',
            iterfield=['apply_to_files', 'flowfield_files'])

        if self.parameters['bounding_box'] is not None:
            dartel2mni_node.inputs.bounding_box = self.parameters[
                'bounding_box']
        if self.parameters['voxel_size'] is not None:
            dartel2mni_node.inputs.voxel_size = self.parameters['voxel_size']
        dartel2mni_node.inputs.modulate = self.parameters['modulation']
        dartel2mni_node.inputs.fwhm = 0

        # Smoothing
        # =========
        if self.parameters['fwhm'] is not None and len(
                self.parameters['fwhm']) > 0:
            smoothing_node = npe.MapNode(spm.Smooth(),
                                         name='smoothing_node',
                                         iterfield=['in_files'])

            smoothing_node.iterables = [
                ('fwhm', [[x, x, x] for x in self.parameters['fwhm']]),
                ('out_prefix',
                 ['fwhm-' + str(x) + 'mm_' for x in self.parameters['fwhm']])
            ]
            smoothing_node.synchronize = True

            join_smoothing_node = npe.JoinNode(
                interface=nutil.Function(
                    input_names=['smoothed_normalized_files'],
                    output_names=['smoothed_normalized_files'],
                    function=join_smoothed_files),
                joinsource='smoothing_node',
                joinfield='smoothed_normalized_files',
                name='join_smoothing_node')
            self.connect([(dartel2mni_node, smoothing_node,
                           [('normalized_files', 'in_files')]),
                          (smoothing_node, join_smoothing_node,
                           [('smoothed_files', 'smoothed_normalized_files')]),
                          (join_smoothing_node, self.output_node,
                           [('smoothed_normalized_files',
                             'smoothed_normalized_files')])])
        else:
            self.output_node.inputs.smoothed_normalized_files = []

        # Atlas Statistics
        # ================
        atlas_stats_node = npe.MapNode(nutil.Function(
            input_names=['in_image', 'in_atlas_list'],
            output_names=['atlas_statistics'],
            function=atlas_statistics),
                                       name='atlas_stats_node',
                                       iterfield=['in_image'])
        atlas_stats_node.inputs.in_atlas_list = self.parameters['atlas_list']

        # Connection
        # ==========
        self.connect([(self.input_node, unzip_tissues_node, [('apply_to_files',
                                                              'in_file')]),
                      (self.input_node, unzip_flowfields_node,
                       [('flowfield_files', 'in_file')]),
                      (self.input_node, unzip_template_node, [('template_file',
                                                               'in_file')]),
                      (unzip_tissues_node, dartel2mni_node,
                       [('out_file', 'apply_to_files')]),
                      (unzip_flowfields_node, dartel2mni_node,
                       [(('out_file', prepare_flowfields,
                          self.parameters['tissues']), 'flowfield_files')]),
                      (unzip_template_node, dartel2mni_node,
                       [('out_file', 'template_file')]),
                      (dartel2mni_node, self.output_node,
                       [('normalized_files', 'normalized_files')]),
                      (dartel2mni_node, atlas_stats_node,
                       [(('normalized_files', select_gm_images), 'in_image')]),
                      (atlas_stats_node, self.output_node,
                       [('atlas_statistics', 'atlas_statistics')])])