Пример #1
0
def ms_func(settings=None, subject_id=None, session_id=None, run_id=None):
    """ Multi-subject functional workflow wrapper """
    # Run single subject mode if only one subject id is provided
    if subject_id is not None and isinstance(subject_id, string_types):
        subject_id = [subject_id]

    sub_list = gather_bids_data(settings['bids_dir'],
                                subject_inclusion=subject_id,
                                include_types=['func'])

    if session_id is not None:
        sub_list = [s for s in sub_list if s[1] == session_id]
    if run_id is not None:
        sub_list = [s for s in sub_list if s[2] == run_id]

    if not sub_list:
        return None

    inputnode = pe.Node(niu.IdentityInterface(fields=['data']),
                        name='inputnode')
    inputnode.iterables = [('data', [list(s) for s in sub_list])]
    func_qc = fmri_qc_workflow(settings=settings)
    func_qc.inputs.inputnode.bids_dir = settings['bids_dir']
    func_qc.inputs.inputnode.start_idx = settings.get('start_idx', 0)
    func_qc.inputs.inputnode.stop_idx = settings.get('stop_idx', None)

    dsplit = pe.Node(niu.Split(splits=[1, 1, 1], squeeze=True),
                     name='datasplit')
    workflow = pe.Workflow(name='funcMRIQC')
    workflow.connect([(inputnode, dsplit, [('data', 'inlist')]),
                      (dsplit, func_qc, [('out1', 'inputnode.subject_id'),
                                         ('out2', 'inputnode.session_id'),
                                         ('out3', 'inputnode.run_id')])])

    return workflow
def ms_anat(settings=None, subject_id=None, session_id=None, run_id=None):
    """ Multi-subject anatomical workflow wrapper """
    # Run single subject mode if only one subject id is provided
    if subject_id is not None and isinstance(subject_id, string_types):
        subject_id = [subject_id]

    sub_list = gather_bids_data(settings['bids_root'],
                                subject_inclusion=subject_id,
                                include_types=['anat'])

    if session_id is not None:
        sub_list = [s for s in sub_list if s[1] == session_id]
    if run_id is not None:
        sub_list = [s for s in sub_list if s[2] == run_id]

    if not sub_list:
        raise RuntimeError('No scans found in %s' % settings['bids_root'])

    inputnode = pe.Node(niu.IdentityInterface(fields=['data']),
                        name='inputnode')
    inputnode.iterables = [('data', [list(s) for s in sub_list])]
    anat_qc = anat_qc_workflow(settings=settings)
    anat_qc.inputs.inputnode.bids_root = settings['bids_root']

    dsplit = pe.Node(niu.Split(splits=[1, 1, 1], squeeze=True),
                     name='datasplit')
    workflow = pe.Workflow(name='anatMRIQC')
    workflow.connect([
        (inputnode, dsplit, [('data', 'inlist')]),
        (dsplit, anat_qc, [('out1', 'inputnode.subject_id'),
                           ('out2', 'inputnode.session_id'),
                           ('out3', 'inputnode.run_id')])
    ])

    return workflow
Пример #3
0
def test_split(tmpdir, args, expected):
    os.chdir(str(tmpdir))

    node = pe.Node(utility.Split(inlist=list(range(4)), splits=[1, 3], **args),
                   name='split_squeeze')
    res = node.run()
    assert res.outputs.out1 == expected[0]
    assert res.outputs.out2 == expected[1]
Пример #4
0
def test_aux_connect_function():
    """ This tests excution nodes with multiple inputs and auxiliary
    function inside the Workflow connect function.
    """
    tempdir = os.path.realpath(mkdtemp())
    origdir = os.getcwd()
    os.chdir(tempdir)

    wf = pe.Workflow(name="test_workflow")

    def _gen_tuple(size):
        return [
            1,
        ] * size

    def _sum_and_sub_mul(a, b, c):
        return (a + b) * c, (a - b) * c

    def _inc(x):
        return x + 1

    params = pe.Node(utility.IdentityInterface(fields=['size', 'num']),
                     name='params')
    params.inputs.num = 42
    params.inputs.size = 1

    gen_tuple = pe.Node(utility.Function(input_names=['size'],
                                         output_names=['tuple'],
                                         function=_gen_tuple),
                        name='gen_tuple')

    ssm = pe.Node(utility.Function(input_names=['a', 'b', 'c'],
                                   output_names=['sum', 'sub'],
                                   function=_sum_and_sub_mul),
                  name='sum_and_sub_mul')

    split = pe.Node(utility.Split(splits=[1, 1], squeeze=True), name='split')

    wf.connect([
        (params, gen_tuple, [(("size", _inc), "size")]),
        (params, ssm, [(("num", _inc), "c")]),
        (gen_tuple, split, [("tuple", "inlist")]),
        (split, ssm, [
            (("out1", _inc), "a"),
            ("out2", "b"),
        ]),
    ])

    wf.run()

    # Clean up
    os.chdir(origdir)
    shutil.rmtree(tempdir)
Пример #5
0
def test_split():
    tempdir = os.path.realpath(mkdtemp())
    origdir = os.getcwd()
    os.chdir(tempdir)

    try:
        node = pe.Node(utility.Split(inlist=list(range(4)), splits=[1, 3]),
                       name='split_squeeze')
        res = node.run()
        yield assert_equal, res.outputs.out1, [0]
        yield assert_equal, res.outputs.out2, [1, 2, 3]

        node = pe.Node(utility.Split(inlist=list(range(4)),
                                     splits=[1, 3],
                                     squeeze=True),
                       name='split_squeeze')
        res = node.run()
        yield assert_equal, res.outputs.out1, 0
        yield assert_equal, res.outputs.out2, [1, 2, 3]
    finally:
        os.chdir(origdir)
        shutil.rmtree(tempdir)
def test_aux_connect_function(tmpdir):
    """ This tests excution nodes with multiple inputs and auxiliary
    function inside the Workflow connect function.
    """
    tmpdir.chdir()

    wf = pe.Workflow(name="test_workflow")

    def _gen_tuple(size):
        return [1,] * size

    def _sum_and_sub_mul(a, b, c):
        return (a + b) * c, (a - b) * c

    def _inc(x):
        return x + 1

    params = pe.Node(utility.IdentityInterface(fields=["size", "num"]), name="params")
    params.inputs.num = 42
    params.inputs.size = 1

    gen_tuple = pe.Node(
        utility.Function(
            input_names=["size"], output_names=["tuple"], function=_gen_tuple
        ),
        name="gen_tuple",
    )

    ssm = pe.Node(
        utility.Function(
            input_names=["a", "b", "c"],
            output_names=["sum", "sub"],
            function=_sum_and_sub_mul,
        ),
        name="sum_and_sub_mul",
    )

    split = pe.Node(utility.Split(splits=[1, 1], squeeze=True), name="split")

    wf.connect(
        [
            (params, gen_tuple, [(("size", _inc), "size")]),
            (params, ssm, [(("num", _inc), "c")]),
            (gen_tuple, split, [("tuple", "inlist")]),
            (split, ssm, [(("out1", _inc), "a"), ("out2", "b"),]),
        ]
    )

    wf.run()
Пример #7
0
def identity_wf(name='Identity', n_tissues=3):
    """
    An identity workflow to check how ideal inverse transform
    affects final evaluation scores.
    """
    wf = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
                        fields=['in_fixed', 'in_tpms', 'in_surf',
                                'in_mask', 'in_field', 'grid_size']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
                         fields=['out_corr', 'out_tpms',
                                 'out_surf', 'out_field', 'out_mask']),
                         name='outputnode')

    # Invert field
    inv = pe.Node(InverseField(), name='InvertField')

    # Compute corrected images
    merge = pe.Node(niu.Merge(2), name='Merge')
    split = pe.Node(niu.Split(splits=[2, n_tissues]), name='Split')

    # Apply tfm to tpms
    applytfm = pe.Node(FieldBasedWarp(), name="ApplyWarp")

    # Connect
    wf.connect([
        (inputnode,       inv, [('in_field', 'in_field')]),
        (inputnode,     merge, [('in_fixed', 'in1'),
                                ('in_tpms', 'in2')]),
        (inputnode,  applytfm, [('in_mask', 'in_mask'),
                                ('in_surf', 'in_surf'),
                                ('grid_size', 'grid_size')]),
        (merge,      applytfm, [('out', 'in_file')]),
        (inv,        applytfm, [('out_field', 'in_field')]),
        (applytfm,      split, [('out_file', 'inlist')]),
        (split,    outputnode, [('out1', 'out_corr'),
                                ('out2', 'out_tpms')]),
        (inv,      outputnode, [('out_field', 'out_field')]),
        (applytfm, outputnode, [('out_surf', 'out_surf'),
                                ('out_mask', 'out_mask')])
    ])

    return wf
Пример #8
0
def init_transform_to_first_image_wf(name='transform_images', n_images=2):

    wf = pe.Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=['in_files', 'transforms']),
        name='inputnode')

    split = pe.Node(niu.Split(splits=[1, n_images - 1]), name='split')
    wf.connect(inputnode, 'in_files', split, 'inlist')

    apply_sinc = pe.MapNode(
        ants.ApplyTransforms(interpolation='LanczosWindowedSinc'),
        iterfield=['input_image'],
        name='apply_sinc')
    wf.connect(inputnode, 'transforms', apply_sinc, 'transforms')
    wf.connect(split, ('out1', _pickone), apply_sinc, 'reference_image')
    wf.connect(split, 'out2', apply_sinc, 'input_image')

    merge_lists = pe.Node(niu.Merge(2), name='merge_lists')
    wf.connect(split, 'out1', merge_lists, 'in1')
    wf.connect(apply_sinc, 'output_image', merge_lists, 'in2')

    merge_niftis = pe.Node(fsl.Merge(dimension='t'), name='merge_niftis')
    wf.connect(merge_lists, 'out', merge_niftis, 'in_files')

    mean_image = pe.Node(fsl.MeanImage(dimension='T'), name='mean_image')
    wf.connect(merge_niftis, 'merged_file', mean_image, 'in_file')

    outputnode = pe.Node(
        niu.IdentityInterface(fields=['mean_image', 'transformed_images']),
        name='outputnode')
    wf.connect(mean_image, 'out_file', outputnode, 'mean_image')
    wf.connect(merge_lists, 'out', outputnode, 'transformed_images')

    return wf
Пример #9
0
def init_seedconnectivity_wf(seeds, use_mov_pars, name="firstlevel"):
    """
    create workflow to calculate seed connectivity maps
    for resting state functional scans

    :param seeds: dictionary of filenames by user-defined names 
        of binary masks that define the seed regions
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating seed connectivity
    :param name: workflow name (Default value = "firstlevel")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # make two (ordered) lists from (unordered) dictionary of seeds
    seednames = list(seeds.keys())  # contains the keys (seed names)
    seed_paths = [seeds[k]
                  for k in seednames]  # contains the values (filenames)

    # calculate the mean time series of the region defined by each mask
    meants = pe.MapNode(interface=fsl.ImageMeants(),
                        name="meants",
                        iterfield=["mask"])
    meants.inputs.mask = seed_paths

    # calculate the regression of the mean time series onto the functional image
    # the result is the seed connectivity map
    glm = pe.MapNode(interface=fsl.GLM(out_file="beta.nii.gz",
                                       out_cope="cope.nii.gz",
                                       out_varcb_name="varcope.nii.gz",
                                       out_z_name="zstat.nii.gz",
                                       demean=True),
                     name="glm",
                     iterfield=["design"])

    # generate dof text file
    gendoffile = pe.Node(interface=Dof(num_regressors=1), name="gendoffile")

    # split regression outputs by name
    splitimgs = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitzstats")

    # outputs are cope, varcope and zstat for each seed region and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum(
        [["%s_img" % seedname,
          "%s_varcope" % seedname,
          "%s_zstat" % seedname]
         for seedname in seednames], []) + ["dof_file"]),
                         name="outputnode")

    workflow.connect([
        (inputnode, meants, [("bold_file", "in_file")]),
        (inputnode, glm, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (meants, glm, [("out_file", "design")]),
        (glm, splitimgs, [
            ("out_cope", "inlist"),
        ]),
        (glm, splitvarcopes, [
            ("out_varcb", "inlist"),
        ]),
        (glm, splitzstats, [
            ("out_z", "inlist"),
        ]),
        (inputnode, gendoffile, [
            ("bold_file", "in_file"),
        ]),
        (gendoffile, outputnode, [
            ("out_file", "dof_file"),
        ]),
    ])

    # connect outputs named for the seeds
    for i, seedname in enumerate(seednames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % seedname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % seedname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % seedname)

    return workflow, seednames
Пример #10
0
def init_dualregression_wf(componentsfile, use_mov_pars, name="firstlevel"):
    """
    create a workflow to calculate dual regression for ICA seeds

    :param componentsfile: 4d image file with ica components
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating dual regression
    :param name: workflow name (Default value = "firstlevel")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # extract number of ICA components from 4d image and name them
    ncomponents = nib.load(componentsfile).shape[3]
    fname, _ = _splitext(os.path.basename(componentsfile))
    componentnames = ["%s_%d" % (fname, i) for i in range(ncomponents)]

    # first step, calculate spatial regression of ICA components on to the
    # bold file
    glm0 = pe.Node(interface=fsl.GLM(out_file="beta", demean=True),
                   name="glm0")
    glm0.inputs.design = componentsfile

    # second step, calculate the temporal regression of the time series
    # from the first step on to the bold file
    glm1 = pe.Node(interface=fsl.GLM(out_file="beta.nii.gz",
                                     out_cope="cope.nii.gz",
                                     out_varcb_name="varcope.nii.gz",
                                     out_z_name="zstat.nii.gz",
                                     demean=True),
                   name="glm1")

    # split regression outputs into individual images
    splitimgsimage = pe.Node(interface=fsl.Split(dimension="t"),
                             name="splitimgsimage")
    splitvarcopesimage = pe.Node(interface=fsl.Split(dimension="t"),
                                 name="splitvarcopesimage")
    splitzstatsimage = pe.Node(interface=fsl.Split(dimension="t"),
                               name="splitzstatsimage")

    # generate dof text file
    gendoffile = pe.Node(interface=Dof(num_regressors=1), name="gendoffile")

    # outputs are cope, varcope and zstat for each ICA component and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum([[
        "%s_img" % componentname,
        "%s_varcope" % componentname,
        "%s_zstat" % componentname
    ] for componentname in componentnames], []) + ["dof_file"]),
                         name="outputnode")

    # split regression outputs by name
    splitimgs = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitzstats")

    workflow.connect([
        (inputnode, glm0, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (inputnode, glm1, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (glm0, glm1, [("out_file", "design")]),
        (glm1, splitimgsimage, [
            ("out_cope", "in_file"),
        ]),
        (glm1, splitvarcopesimage, [
            ("out_varcb", "in_file"),
        ]),
        (glm1, splitzstatsimage, [
            ("out_z", "in_file"),
        ]),
        (splitimgsimage, splitimgs, [
            ("out_files", "inlist"),
        ]),
        (splitvarcopesimage, splitvarcopes, [
            ("out_files", "inlist"),
        ]),
        (splitzstatsimage, splitzstats, [
            ("out_files", "inlist"),
        ]),
        (inputnode, gendoffile, [
            ("bold_file", "in_file"),
        ]),
        (gendoffile, outputnode, [
            ("out_file", "dof_file"),
        ]),
    ])

    # connect outputs named for the ICA components
    for i, componentname in enumerate(componentnames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % componentname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % componentname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % componentname)

    return workflow, componentnames
Пример #11
0
def init_bidirectional_b0_unwarping_wf(template_plus_pe, omp_nthreads=1,
                                       name="bidirectional_pepolar_unwarping_wf"):
    """
    This workflow takes in a set of b0 files with opposite phase encoding
    direction and calculates displacement fields
    (in other words, an ANTs-compatible warp file). This is intended to be run
    in the case where there are two dwi series in the same session with reverse
    phase encoding directions.

    The warp field correcting for the distortions is estimated using AFNI's
    3dQwarp, with displacement estimation limited to the target file phase
    encoding direction.

    It also calculates a new mask for the input dataset that takes into
    account the distortions.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from qsiprep.workflows.fieldmap.pepolar import init_pepolar_unwarp_wf
        wf = init_pepolar_unwarp_wf(
            bold_meta={'PhaseEncodingDirection': 'j'},
            epi_fmaps=[('/dataset/sub-01/fmap/sub-01_epi.nii.gz', 'j-')],
            omp_nthreads=8)


    Inputs

        template_plus
            b0 template in one PE
        template_minus
            b0_template in the other PE

    Outputs

        out_reference
            the ``in_reference`` after unwarping
        out_reference_brain
            the ``in_reference`` after unwarping and skullstripping
        out_warp_plus
            the corresponding :abbr:`DFM (displacements field map)` to correct
            ``template_plus``
        out_warp_minus
            the corresponding :abbr:`DFM (displacements field map)` to correct
            ``template_minus``
        out_mask
            mask of the unwarped input file

    """
    args = '-noXdis -noYdis -noZdis'
    rm_arg = {'i': '-noXdis',
              'j': '-noYdis',
              'k': '-noZdis'}[template_plus_pe[0]]
    args = args.replace(rm_arg, '')

    workflow = Workflow(name=name)
    workflow.__desc__ = """\
A deformation field to correct for susceptibility distortions was estimated
based on two b0 templates created from dwi series with opposing phase-encoding
directions, using `3dQwarp` @afni (AFNI {afni_ver}).
""".format(afni_ver=''.join(['%02d' % v for v in afni.Info().version() or []]))

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['template_plus', 'template_minus', 't1w_brain']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['out_reference', 'out_reference_brain', 'out_affine_plus', 'out_warp_plus',
                'out_affine_minus', 'out_warp_minus', 'out_mask']), name='outputnode')
    # Create high-contrast ref images
    plus_ref_wf = init_dwi_reference_wf(name='plus_ref_wf')
    minus_ref_wf = init_dwi_reference_wf(name='minus_ref_wf')

    # Align the two reference images to the midpoint
    inputs_to_list = pe.Node(niu.Merge(2), name='inputs_to_list')
    align_reverse_pe_wf = init_b0_hmc_wf(align_to='iterative', transform='Rigid')
    get_midpoint_transforms = pe.Node(niu.Split(splits=[1, 1], squeeze=True),
                                      name="get_midpoint_transforms")
    plus_to_midpoint = pe.Node(ants.ApplyTransforms(float=True,
                                                    interpolation='LanczosWindowedSinc',
                                                    dimension=3),
                               name='plus_to_midpoint')
    minus_to_midpoint = pe.Node(ants.ApplyTransforms(float=True,
                                                     interpolation='LanczosWindowedSinc',
                                                     dimension=3),
                                name='minus_to_midpoint')

    qwarp = pe.Node(afni.QwarpPlusMinus(pblur=[0.05, 0.05],
                                        blur=[-1, -1],
                                        noweight=True,
                                        minpatch=9,
                                        nopadWARP=True,
                                        environ={'OMP_NUM_THREADS': '%d' % omp_nthreads},
                                        args=args),
                    name='qwarp', n_procs=omp_nthreads)

    to_ants_plus = pe.Node(niu.Function(function=_fix_hdr), name='to_ants_plus',
                           mem_gb=0.01)
    to_ants_minus = pe.Node(niu.Function(function=_fix_hdr), name='to_ants_minus',
                            mem_gb=0.01)

    cphdr_plus_warp = pe.Node(CopyHeader(), name='cphdr_plus_warp', mem_gb=0.01)
    cphdr_minus_warp = pe.Node(CopyHeader(), name='cphdr_minus_warp', mem_gb=0.01)

    unwarp_plus_reference = pe.Node(ants.ApplyTransforms(dimension=3,
                                                         float=True,
                                                         interpolation='LanczosWindowedSinc'),
                                    name='unwarp_plus_reference')
    unwarp_minus_reference = pe.Node(ants.ApplyTransforms(dimension=3,
                                                          float=True,
                                                          interpolation='LanczosWindowedSinc'),
                                     name='unwarp_minus_reference')
    unwarped_to_list = pe.Node(niu.Merge(2), name="unwarped_to_list")
    merge_unwarped = pe.Node(ants.AverageImages(dimension=3, normalize=True),
                             name="merge_unwarped")

    final_ref = init_dwi_reference_wf(name="final_ref")

    workflow.connect([
        (inputnode, plus_ref_wf, [('template_plus', 'inputnode.b0_template')]),
        (plus_ref_wf, inputs_to_list, [('outputnode.ref_image', 'in1')]),
        (inputnode, minus_ref_wf, [('template_minus', 'inputnode.b0_template')]),
        (minus_ref_wf, inputs_to_list, [('outputnode.ref_image', 'in2')]),
        (inputs_to_list, align_reverse_pe_wf, [('out', 'inputnode.b0_images')]),
        (align_reverse_pe_wf, get_midpoint_transforms, [('outputnode.forward_transforms',
                                                         'inlist')]),
        (get_midpoint_transforms, outputnode, [('out1', 'out_affine_plus'),
                                               ('out2', 'out_affine_minus')]),
        (plus_ref_wf, plus_to_midpoint, [('outputnode.ref_image', 'input_image')]),
        (minus_ref_wf, minus_to_midpoint, [('outputnode.ref_image', 'input_image')]),
        (get_midpoint_transforms, plus_to_midpoint, [('out1', 'transforms')]),
        (align_reverse_pe_wf, plus_to_midpoint, [('outputnode.final_template',
                                                  'reference_image')]),
        (get_midpoint_transforms, minus_to_midpoint, [('out2', 'transforms')]),
        (align_reverse_pe_wf, minus_to_midpoint, [('outputnode.final_template',
                                                  'reference_image')]),
        (plus_to_midpoint, qwarp, [('output_image', 'in_file')]),
        (minus_to_midpoint, qwarp, [('output_image', 'base_file')]),
        (align_reverse_pe_wf, cphdr_plus_warp, [('outputnode.final_template', 'hdr_file')]),
        (align_reverse_pe_wf, cphdr_minus_warp, [('outputnode.final_template', 'hdr_file')]),
        (qwarp, cphdr_plus_warp, [('source_warp', 'in_file')]),
        (qwarp, cphdr_minus_warp, [('base_warp', 'in_file')]),
        (cphdr_plus_warp, to_ants_plus, [('out_file', 'in_file')]),
        (cphdr_minus_warp, to_ants_minus, [('out_file', 'in_file')]),

        (to_ants_minus, unwarp_minus_reference, [('out', 'transforms')]),
        (minus_to_midpoint, unwarp_minus_reference, [('output_image', 'reference_image'),
                                                     ('output_image', 'input_image')]),
        (to_ants_minus, outputnode, [('out', 'out_warp_minus')]),

        (to_ants_plus, unwarp_plus_reference, [('out', 'transforms')]),
        (plus_to_midpoint, unwarp_plus_reference, [('output_image', 'reference_image'),
                                                   ('output_image', 'input_image')]),
        (to_ants_plus, outputnode, [('out', 'out_warp_plus')]),

        (unwarp_plus_reference, unwarped_to_list, [('output_image', 'in1')]),
        (unwarp_minus_reference, unwarped_to_list, [('output_image', 'in2')]),
        (unwarped_to_list, merge_unwarped, [('out', 'images')]),

        (merge_unwarped, final_ref, [('output_average_image', 'inputnode.b0_template')]),
        (final_ref, outputnode, [('outputnode.ref_image', 'out_reference'),
                                 ('outputnode.ref_image_brain', 'out_reference_brain'),
                                 ('outputnode.dwi_mask', 'out_mask')])
    ])

    return workflow
Пример #12
0
def init_glm_wf(conditions,
                contrasts,
                repetition_time,
                use_mov_pars,
                name="glm"):
    """
    create workflow to calculate a first level glm for task functional data

    :param conditions: dictionary of conditions with onsets and durations 
        by condition names
    :param contrasts: dictionary of contrasts by names
    :param repetition_time: repetition time
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating the glm
    :param name: workflow name (Default value = "glm")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # transform (unordered) conditions dictionary into three (ordered) lists

    names = list(conditions.keys())
    onsets = [conditions[k]["onsets"] for k in names]
    durations = [conditions[k]["durations"] for k in names]

    # first level model specification
    modelspec = pe.Node(interface=model.SpecifyModel(
        input_units="secs",
        high_pass_filter_cutoff=128.,
        time_repetition=repetition_time,
        subject_info=Bunch(conditions=names,
                           onsets=onsets,
                           durations=durations)),
                        name="modelspec")

    # transform contrasts dictionary to nipype list data structure
    contrasts_ = [[k, "T"] +
                  [list(i) for i in zip(*[(n, val) for n, val in v.items()])]
                  for k, v in contrasts.items()]

    connames = [k[0] for k in contrasts_]

    # outputs are cope, varcope and zstat for each contrast and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum(
        [["%s_img" % conname,
          "%s_varcope" % conname,
          "%s_zstat" % conname] for conname in connames], []) + ["dof_file"]),
                         name="outputnode")

    outputnode._interface.names = connames

    # generate design from first level specification
    level1design = pe.Node(interface=fsl.Level1Design(
        contrasts=contrasts_,
        interscan_interval=repetition_time,
        model_serial_correlations=True,
        bases={"dgamma": {
            "derivs": False
        }}),
                           name="level1design")

    # generate required input files for FILMGLS from design
    modelgen = pe.Node(interface=fsl.FEATModel(),
                       name="modelgen",
                       iterfield=["fsf_file", "ev_files"])

    # calculate range of image values to determine cutoff value
    # for FILMGLS
    stats = pe.Node(interface=fsl.ImageStats(op_string="-R"), name="stats")

    # actuallt estimate the firsy level model
    modelestimate = pe.Node(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                  mask_size=5),
                            name="modelestimate",
                            iterfield=["design_file", "in_file", "tcon_file"])

    # mask regression outputs
    maskimgs = pe.MapNode(interface=fsl.ApplyMask(),
                          name="maskimgs",
                          iterfield=["in_file"])
    maskvarcopes = pe.MapNode(interface=fsl.ApplyMask(),
                              name="maskvarcopes",
                              iterfield=["in_file"])
    maskzstats = pe.MapNode(interface=fsl.ApplyMask(),
                            name="maskzstats",
                            iterfield=["in_file"])

    # split regression outputs by name
    splitimgs = pe.Node(interface=niu.Split(splits=[1
                                                    for conname in connames]),
                        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for conname in connames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for conname in connames]),
        name="splitzstats")

    # pass movement parameters to glm model specification if requested
    c = [("bold_file", "functional_runs")]
    if use_mov_pars:
        c.append(("confounds_file", "realignment_parameters"))

    workflow.connect([
        (inputnode, modelspec, c),
        (inputnode, modelestimate, [("bold_file", "in_file")]),
        (modelspec, level1design, [("session_info", "session_info")]),
        (level1design, modelgen, [("fsf_files", "fsf_file"),
                                  ("ev_files", "ev_files")]),
        (inputnode, stats, [("bold_file", "in_file")]),
        (stats, modelestimate, [(("out_stat", get_float), "threshold")]),
        (modelgen, modelestimate, [("design_file", "design_file"),
                                   ("con_file", "tcon_file")]),
        (inputnode, maskimgs, [("mask_file", "mask_file")]),
        (inputnode, maskvarcopes, [("mask_file", "mask_file")]),
        (inputnode, maskzstats, [("mask_file", "mask_file")]),
        (modelestimate, maskimgs, [
            (("copes", flatten), "in_file"),
        ]),
        (modelestimate, maskvarcopes, [
            (("varcopes", flatten), "in_file"),
        ]),
        (modelestimate, maskzstats, [
            (("zstats", flatten), "in_file"),
        ]),
        (modelestimate, outputnode, [("dof_file", "dof_file")]),
        (maskimgs, splitimgs, [
            ("out_file", "inlist"),
        ]),
        (maskvarcopes, splitvarcopes, [
            ("out_file", "inlist"),
        ]),
        (maskzstats, splitzstats, [
            ("out_file", "inlist"),
        ]),
    ])

    # connect outputs named for the contrasts
    for i, conname in enumerate(connames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % conname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % conname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % conname)

    return workflow, connames
Пример #13
0
def create_mtr_workflow(scan_directory: str,
                        patient_id: str = None,
                        scan_id: str = None,
                        reorient: str = 'RAI',
                        split_mton_flag=False,
                        use_iacl_struct=False) -> pe.Workflow:
    '''
    Registers and estimates t2star map
    :param scan_directory:
    :param patient_id:
    :param scan_id:
    :param reorient:
    :param num_threads:
    :return: A :class:'nipype.pipeline.engine.Workflow' object
    :rtype: nipype.pipeline.engine.Workflow
    '''

    name = 'mtr'

    if patient_id is not None and scan_id is not None:
        scan_directory = os.path.join(scan_directory, patient_id, 'pipeline')
        name += '_' + scan_id

    wf = pe.Workflow(name, scan_directory)

    input_node = pe.Node(util.IdentityInterface(
        fields=['mton_file', 'mtoff_file', 'target_file', 'brainmask_file'],
        mandatory_inputs=False),
                         name='input_node')

    mtfile_node = pe.Node(
        util.IdentityInterface(fields=['mton_file', 'mtoff_file']),
        name='mtfile_node')

    if split_mton_flag:
        split_mt = pe.Node(fsl.Split(), 'split_mt')
        split_mt.inputs.dimension = 't'
        wf.connect(input_node, 'mton_file', split_mt, 'in_file')

        split_mt_files = pe.Node(util.Split(), 'split_mt_files')
        split_mt_files.inputs.splits = [1, 1]
        split_mt_files.inputs.squeeze = True
        wf.connect(split_mt, 'out_files', split_mt_files, 'inlist')

        wf.connect(split_mt, 'out1', mtfile_node,
                   'mton_file')  #TODO: Check if MTON is first
        wf.connect(split_mt, 'out2', mtfile_node, 'mtoff_file')
    else:
        wf.connect(input_node, 'mton_file', mtfile_node, 'mton_file')
        wf.connect(input_node, 'mtoff_file', mtfile_node, 'mtoff_file')

    #wf.connect(input_node, 'mton_file', mtfile_node, 'mton_file')
    #wf.connect(input_node, 'mtoff_file', mtfile_node, 'mtoff_file')

    # Reorient
    if reorient is not None:
        reorient_mton_to_target = pe.Node(image.Reorient(),
                                          iterfield=['in_file'],
                                          name='reorient_mton_to_target')
        reorient_mton_to_target.inputs.orientation = reorient
        wf.connect(mtfile_node, 'mton_file', reorient_mton_to_target,
                   'in_file')

        reorient_mtoff_to_target = pe.Node(image.Reorient(),
                                           iterfield=['in_file'],
                                           name='reorient_mtoff_to_target')
        reorient_mtoff_to_target.inputs.orientation = reorient
        wf.connect(mtfile_node, 'mtoff_file', reorient_mtoff_to_target,
                   'in_file')

    #select_first_t2star = pe.Node(util.Split(), name='get_first_t2star')
    #select_first_t2star.inputs.splits = [1, num_t2star_files - 1]
    #select_first_t2star.inputs.squeeze = True

    #if reorient is not None:
    #    wf.connect(reorient_to_target, 'out_file', select_first_t2star, 'inlist')
    #else:
    #    wf.connect(input_node, 't2star_files', select_first_t2star, 'inlist')

    affine_reg_to_target = pe.Node(ants.Registration(),
                                   name='affine_reg_to_target')
    affine_reg_to_target.inputs.dimension = 3
    affine_reg_to_target.inputs.interpolation = 'Linear'
    affine_reg_to_target.inputs.metric = ['MI', 'MI']
    affine_reg_to_target.inputs.metric_weight = [1.0, 1.0]
    affine_reg_to_target.inputs.radius_or_number_of_bins = [32, 32]
    affine_reg_to_target.inputs.sampling_strategy = ['Regular', 'Regular']
    affine_reg_to_target.inputs.sampling_percentage = [0.25, 0.25]
    affine_reg_to_target.inputs.transforms = ['Rigid', 'Affine']
    affine_reg_to_target.inputs.transform_parameters = [(0.1, ), (0.1, )]
    affine_reg_to_target.inputs.number_of_iterations = [[100, 50, 25],
                                                        [100, 50, 25]]
    affine_reg_to_target.inputs.convergence_threshold = [1e-6, 1e-6]
    affine_reg_to_target.inputs.convergence_window_size = [10, 10]
    affine_reg_to_target.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]]
    affine_reg_to_target.inputs.sigma_units = ['vox', 'vox']
    affine_reg_to_target.inputs.shrink_factors = [[4, 2, 1], [4, 2, 1]]
    affine_reg_to_target.inputs.write_composite_transform = True
    affine_reg_to_target.inputs.initial_moving_transform_com = 1
    affine_reg_to_target.inputs.output_warped_image = True
    # wf.connect(select_first_t2star, 'out1', affine_reg_to_target, 'moving_image') #TODO: Check mtoff is registered?
    wf.connect(input_node, 'target_file', affine_reg_to_target, 'fixed_image')
    if reorient is not None:
        wf.connect(reorient_mtoff_to_target, 'out_file', affine_reg_to_target,
                   'moving_image')
    else:
        wf.connect(mtfile_node, 'mtoff_file', affine_reg_to_target,
                   'moving_image')

    transform_mton = pe.Node(ants.ApplyTransforms(), name='transform_mton')
    transform_mton.inputs.input_image_type = 3
    wf.connect(input_node, 'target_file', transform_mton, 'reference_image')
    wf.connect(affine_reg_to_target, 'composite_transform', transform_mton,
               'transforms')
    if reorient is not None:
        wf.connect(reorient_mton_to_target, 'out_file', transform_mton,
                   'input_image')
    else:
        wf.connect(mtfile_node, 'mton_file', transform_mton, 'input_image')

    estimate = pe.Node(EstimateMTR(), name='estimate_mtr')
    wf.connect(transform_mton, 'output_image', estimate, 'mton_file')
    wf.connect(affine_reg_to_target, 'warped_image', estimate, 'mtoff_file')
    wf.connect(input_node, 'brainmask_file', estimate, 'brainmask_file')

    #TODO: Copy output to a final folder
    # Set up base filename for copying outputs
    if use_iacl_struct:
        out_file_base = os.path.join(scan_directory, patient_id, scan_id,
                                     patient_id + '_' + scan_id)
    else:
        if patient_id is not None:
            out_file_base = patient_id + '_' + scan_id if scan_id is not None else patient_id
        else:
            out_file_base = 'out'
        out_file_base = os.path.join(scan_directory, out_file_base)

    export_mtr = pe.Node(io.ExportFile(), name='export_mtr')
    export_mtr.inputs.check_extension = True
    export_mtr.inputs.clobber = True
    export_mtr.inputs.out_file = out_file_base + '_MTR.nii.gz'
    wf.connect(estimate, 'mtr_file', export_mtr, 'in_file')

    return wf
Пример #14
0
def init_combine_mp2rage_wf(sourcedata,
                            derivatives,
                            name='combine_mp2rages',
                            n_mp2rages=2):

    wf = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'sourcedata', 'derivatives', 'subject', 'session', 'acquisition'
    ]),
                        name='inputnode')

    inputnode.inputs.sourcedata = sourcedata
    inputnode.inputs.derivatives = derivatives

    get_parameters = pe.MapNode(niu.Function(
        function=get_mp2rage_pars,
        input_names=['sourcedata', 'subject', 'session', 'acquisition'],
        output_names=['mp2rage_parameters']),
                                iterfield=['acquisition'],
                                name='get_mp2rage_pars')

    wf.connect([(inputnode, get_parameters, [('sourcedata', 'sourcedata'),
                                             ('subject', 'subject'),
                                             ('session', 'session'),
                                             ('acquisition', 'acquisition')])])

    make_t1w = pe.MapNode(niu.Function(function=fit_mp2rage,
                                       input_names=['mp2rage_parameters'],
                                       output_names=['t1w_uni', 't1map']),
                          iterfield=['mp2rage_parameters'],
                          name='make_t1w')

    wf.connect([(get_parameters, make_t1w, [('mp2rage_parameters',
                                             'mp2rage_parameters')])])

    get_first_inversion = pe.MapNode(niu.Function(
        function=get_inv,
        input_names=['mp2rage_parameters', 'inv', 'echo'],
        output_names='inv1'),
                                     iterfield=['mp2rage_parameters'],
                                     name='get_first_inversion')

    get_first_inversion.inputs.inv = 1
    get_first_inversion.inputs.echo = 1
    wf.connect(get_parameters, 'mp2rage_parameters', get_first_inversion,
               'mp2rage_parameters')

    split = pe.Node(niu.Split(splits=[1, n_mp2rages - 1]), name='split')
    wf.connect(get_first_inversion, 'inv1', split, 'inlist')

    flirt = pe.MapNode(fsl.FLIRT(dof=6), iterfield=['in_file'], name='flirt')

    wf.connect(split, ('out1', _pickone), flirt, 'reference')
    wf.connect(split, 'out2', flirt, 'in_file')

    convert2itk = pe.MapNode(C3dAffineTool(),
                             iterfield=['source_file', 'transform_file'],
                             name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True

    wf.connect(flirt, 'out_matrix_file', convert2itk, 'transform_file')
    wf.connect(split, ('out1', _pickone), convert2itk, 'reference_file')
    wf.connect(split, 'out2', convert2itk, 'source_file')

    transform_t1w_wf = init_transform_to_first_image_wf('transforms_t1w',
                                                        n_images=n_mp2rages)

    wf.connect(make_t1w, 't1w_uni', transform_t1w_wf, 'inputnode.in_files')
    wf.connect(convert2itk, 'itk_transform', transform_t1w_wf,
               'inputnode.transforms')

    get_second_inversion = pe.MapNode(niu.Function(
        function=get_inv,
        input_names=['mp2rage_parameters', 'inv', 'echo'],
        output_names='inv2'),
                                      iterfield=['mp2rage_parameters'],
                                      name='get_second_inversion')
    get_second_inversion.inputs.inv = 2

    transform_inv2_wf = init_transform_to_first_image_wf('transforms_inv2',
                                                         n_images=n_mp2rages)
    wf.connect(get_parameters, 'mp2rage_parameters', get_second_inversion,
               'mp2rage_parameters')
    wf.connect(get_second_inversion, 'inv2', transform_inv2_wf,
               'inputnode.in_files')
    wf.connect(convert2itk, 'itk_transform', transform_inv2_wf,
               'inputnode.transforms')

    transform_t1map_wf = init_transform_to_first_image_wf('transform_t1map',
                                                          n_images=n_mp2rages)

    wf.connect(make_t1w, 't1map', transform_t1map_wf, 'inputnode.in_files')
    wf.connect(convert2itk, 'itk_transform', transform_t1map_wf,
               'inputnode.transforms')

    ds_t1w = pe.MapNode(DerivativesDataSink(base_directory=derivatives,
                                            keep_dtype=False,
                                            out_path_base='t1w',
                                            suffix='T1w'),
                        iterfield=['in_file', 'source_file'],
                        name='ds_t1w')

    reorient_t1w = pe.MapNode(Reorient(),
                              iterfield=['in_file'],
                              name='reorient_t1w')

    wf.connect(make_t1w, 't1w_uni', reorient_t1w, 'in_file')
    wf.connect(reorient_t1w, 'out_file', ds_t1w, 'in_file')
    wf.connect(get_first_inversion, 'inv1', ds_t1w, 'source_file')

    ds_t1map = pe.MapNode(DerivativesDataSink(base_directory=derivatives,
                                              keep_dtype=False,
                                              out_path_base='t1map',
                                              suffix='T1w'),
                          iterfield=['in_file', 'source_file'],
                          name='ds_t1map')

    reorient_t1map = pe.MapNode(Reorient(),
                                iterfield=['in_file'],
                                name='reorient_t1map')

    wf.connect(make_t1w, 't1map', reorient_t1map, 'in_file')
    wf.connect(reorient_t1map, 'out_file', ds_t1map, 'in_file')
    wf.connect(get_first_inversion, 'inv1', ds_t1map, 'source_file')

    ds_t1w_average = pe.Node(DerivativesDataSink(
        base_directory=derivatives,
        keep_dtype=False,
        out_path_base='averaged_mp2rages',
        suffix='T1w',
        space='average'),
                             name='ds_t1w_average')

    rename = pe.Node(niu.Rename(use_fullpath=True), name='rename')
    rename.inputs.format_string = '%(path)s/sub-%(subject_id)s_ses-%(session)s_MPRAGE.nii.gz'
    rename.inputs.parse_string = '(?P<path>.+)/sub-(?P<subject_id>.+)_ses-(?P<session>.+)_acq-.+_MPRAGE.nii(.gz)?'

    wf.connect(get_first_inversion, ('inv1', _pickone), rename, 'in_file')
    reorient_average_t1w = pe.Node(Reorient(), name='reorient_average_t1w')
    wf.connect(transform_t1w_wf, 'outputnode.mean_image', reorient_average_t1w,
               'in_file')
    wf.connect(reorient_average_t1w, 'out_file', ds_t1w_average, 'in_file')
    wf.connect(rename, 'out_file', ds_t1w_average, 'source_file')

    ds_t1map_average = pe.Node(DerivativesDataSink(
        base_directory=derivatives,
        keep_dtype=False,
        out_path_base='averaged_mp2rages',
        suffix='T1map',
        space='average'),
                               name='ds_t1map_average')

    reorient_t1map_average = pe.Node(Reorient(), name='reorient_t1map_average')
    wf.connect(rename, 'out_file', ds_t1map_average, 'source_file')
    wf.connect(transform_t1map_wf, 'outputnode.mean_image',
               reorient_t1map_average, 'in_file')
    wf.connect(reorient_t1map_average, 'out_file', ds_t1map_average, 'in_file')

    ds_inv2 = pe.Node(DerivativesDataSink(base_directory=derivatives,
                                          keep_dtype=False,
                                          out_path_base='averaged_mp2rages',
                                          suffix='INV2',
                                          space='average'),
                      name='ds_inv2')

    reorient_inv2 = pe.Node(Reorient(), name='reorient_inv2')

    wf.connect(rename, 'out_file', ds_inv2, 'source_file')
    wf.connect(transform_inv2_wf, 'outputnode.mean_image', reorient_inv2,
               'in_file')
    wf.connect(reorient_inv2, 'out_file', ds_inv2, 'in_file')

    return wf
Пример #15
0
def init_qsiprep_intramodal_template_wf(
    inputs_list, transform="Rigid", num_iterations=2,
    mem_gb=3, omp_nthreads=1, name="intramodal_template_wf"):
    """Create an unbiased intramodal template for a subject. This aligns the b=0 references
    from all the scans of a subject. Can be rigid, affine or nonlinear (BSplineSyN).

    **Parameters**
        inputs_list: list of inputs
            List if identifiers for the input b=0 images.
        transform: 'Rigid', 'Affine', 'BSplineSyN'
            Which transform to ultimately use. If 'BSplineSyN', first 2 iterations of Affine will
            be run.
        num_iterations: int
            Default: 2.

    **Inputs**

        [workflow_name]_image...
            One input for each input image. There is no input called inputs_list
        t1w_image

    **Outputs**
        [workflow_name]_transform
            transform files to the intramodal template

        intramodal_template_to_t1w_transform
            Transform from the b0

    """
    workflow = Workflow(name=name)
    input_names = [name + '_b0_template' for name in inputs_list]
    output_names = [name + '_transform' for name in inputs_list]

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=input_names + ['t1w_brain']),
        name='inputnode')
    merge_inputs = pe.Node(niu.Merge(len(input_names)), name='merge_inputs')
    for input_num, input_name in enumerate(input_names):
        workflow.connect(inputnode, input_name, merge_inputs, 'in%d' % (input_num + 1))

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=output_names + ["intramodal_template",
                                   "intramodal_template_mask",
                                   "intramodal_template_to_t1w_transform"]),
        name='outputnode')
    split_outputs = pe.Node(niu.Split(splits=[1] * len(input_names), squeeze=True),
                            name='split_outputs')
    for output_num, output_name in enumerate(output_names):
        workflow.connect(split_outputs, 'out%d' % (output_num + 1), outputnode, output_name)

    # N4 correct
    n4_correct = pe.MapNode(
        ants.N4BiasFieldCorrection(
            dimension=3,
            copy_header=True,
            n_iterations=[50, 50, 40, 30],
            shrink_factor=2,
            convergence_threshold=0.00000001,
            bspline_fitting_distance=200,
            bspline_order=3),
        name='n4_correct',
        iterfield=['input_image'])

    # Should we add nonlinear iterations?
    do_nonlinear = transform not in ('Rigid', 'Affine')

    # Align the b=0 images from all runs (Linear)
    initial_transform = 'Affine' if do_nonlinear else transform
    intramodal_b0_affine_template = init_b0_hmc_wf(
        align_to='iterative',
        num_iters=2,
        transform=initial_transform,
        spatial_bias_correct=True,
        name='intramodal_b0_affine_template')

    intramodal_template_mask = init_skullstrip_b0_wf(name="intramodal_template_mask")

    workflow.connect([
        (merge_inputs, n4_correct, [('out', 'input_image')]),
        (n4_correct, intramodal_b0_affine_template, [
            ('output_image', 'inputnode.b0_images')]),
        (intramodal_template_mask, outputnode, [
            ('outputnode.mask_file', 'intramodal_template_mask')])
    ])
    if not do_nonlinear:
        workflow.connect([
            (intramodal_b0_affine_template, intramodal_template_mask, [
                ('outputnode.final_template', 'inputnode.in_file')]),
            (intramodal_b0_affine_template, split_outputs, [
                (('outputnode.forward_transforms', _list_squeeze), 'inlist')])
        ])
    else:
        nonlinear_alignment_wf = init_nonlinear_alignment_wf(num_iters=num_iterations)
        workflow.connect([
            (n4_correct, nonlinear_alignment_wf, [('output_image', 'inputnode.images')]),
            (nonlinear_alignment_wf, intramodal_template_mask, [
                ('outputnode.final_template', 'inputnode.in_file')]),
            (intramodal_b0_affine_template, nonlinear_alignment_wf, [
                ('outputnode.final_template', 'inputnode.initial_template')]),
            (nonlinear_alignment_wf, split_outputs, [
                ('outputnode.forward_transforms', 'inlist')])
        ])

    return workflow
Пример #16
0
def init_intramodal_template_wf(inputs_list, t1w_source_file, reportlets_dir, transform="Rigid",
                                num_iterations=2, mem_gb=3, omp_nthreads=1,
                                name="intramodal_template_wf"):
    """Create an unbiased intramodal template for a subject. This aligns the b=0 references
    from all the scans of a subject. Can be rigid, affine or nonlinear (BSplineSyN).

    **Parameters**
        inputs_list: list of inputs
            List if identifiers for the input b=0 images.
        transform: 'Rigid', 'Affine', 'BSplineSyN'
            Which transform to ultimately use. If 'BSplineSyN', first 2 iterations of Affine will
            be run.
        num_iterations: int
            Default: 2.

    **Inputs**

        [workflow_name]_image...
            One input for each input image. There is no input called inputs_list
        t1w_image

    **Outputs**
        [workflow_name]_transform
            transform files to the intramodal template

        intramodal_template_to_t1w_transform
            Transform from the b0

    """
    workflow = Workflow(name=name)
    input_names = [name.replace('-', '_') + '_b0_template' for name in inputs_list]
    output_names = [name.replace('-', '_') + '_transform' for name in inputs_list]

    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=input_names + [
                't1_brain', 't1_preproc', 't1_mask', 't1_seg', 'subjects_dir', 'subject_id',
                't1_aseg', 't1_aparc', 't1_tpms', 't1_2_mni_forward_transform',
                'dwi_sampling_grid', 't1_2_fsnative_forward_transform',
                't1_2_fsnative_reverse_transform', 't1_2_mni_reverse_transform']),
        name='inputnode')
    merge_inputs = pe.Node(niu.Merge(len(input_names)), name='merge_inputs')
    rename_inputs = pe.MapNode(
        niu.Rename(keep_ext=True),
        iterfield=['in_file', 'format_string'],
        name='rename_inputs')
    rename_inputs.inputs.format_string = input_names
    rename_inputs.synchronize = True
    for input_num, input_name in enumerate(input_names):
        workflow.connect(inputnode, input_name, merge_inputs, 'in%d' % (input_num + 1))

    outputnode = pe.Node(
        niu.IdentityInterface(
            fields=output_names + ["intramodal_template",
                                   "intramodal_template_mask",
                                   "intramodal_template_to_t1_affine",
                                   "intramodal_template_to_t1_warp"]),
        name='outputnode')
    split_outputs = pe.Node(niu.Split(splits=[1] * len(input_names), squeeze=True),
                            name='split_outputs')
    for output_num, output_name in enumerate(output_names):
        workflow.connect(split_outputs, 'out%d' % (output_num + 1), outputnode, output_name)

    runtime_opts = {'num_cores': 1, 'parallel_control': 0}
    if omp_nthreads > 1:
        runtime_opts = {'num_cores': omp_nthreads, 'parallel_control': 2}
    ants_mvtc2 = pe.Node(MultivariateTemplateConstruction2(dimension=3, **runtime_opts),
                         name='ants_mvtc2')
    intramodal_template_mask = init_skullstrip_b0_wf(name="intramodal_template_mask")

    workflow.connect([
        (merge_inputs, rename_inputs, [('out', 'in_file')]),
        (rename_inputs, ants_mvtc2, [('out_file', 'input_images')]),
        (intramodal_template_mask, outputnode, [
            ('outputnode.mask_file', 'intramodal_template_mask')]),
        (ants_mvtc2, intramodal_template_mask, [
            ('templates', 'inputnode.in_file')]),
        (ants_mvtc2, split_outputs, [
            ('forward_transforms', 'inlist')]),
        (ants_mvtc2, outputnode, [
            ('templates', 'intramodal_template')])
    ])

    # calculate dwi registration to T1w
    b0_coreg_wf = init_b0_to_anat_registration_wf(omp_nthreads=omp_nthreads,
                                                  mem_gb=mem_gb,
                                                  write_report=True)

    workflow.connect([
        (inputnode, b0_coreg_wf, [
            ('t1_brain', 'inputnode.t1_brain'),
            ('t1_seg', 'inputnode.t1_seg'),
            ('subjects_dir', 'inputnode.subjects_dir'),
            ('subject_id', 'inputnode.subject_id'),
            ('t1_2_fsnative_reverse_transform',
             'inputnode.t1_2_fsnative_reverse_transform')]),
        (ants_mvtc2, b0_coreg_wf, [
            ('templates', 'inputnode.ref_b0_brain')]),
        (b0_coreg_wf, outputnode, [
            ('outputnode.itk_b0_to_t1', 'intramodal_template_to_t1_affine')])
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = t1w_source_file

    return workflow
Пример #17
0
merge_mbRef = pe.Node(interface=fsl.Merge(dimension = 't'), name = 'merge_mbRef') 
preproc.connect(join_mbRef, ('files',  flatten), merge_mbRef, 'in_files')

# calculate how well normalized mbref overlap. 
calc_overlap_mbRef = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean'), name = 'calc_overlap_mbRef') 
preproc.connect(merge_mbRef, 'merged_file',  calc_overlap_mbRef, 'in_file')

# create a mask based on the mbRef 
mask_overlap_mbRef = pe.Node(interface=fsl.ImageMaths(op_string = '-thr .95 -bin'), name = 'mask_overlap_mbRef')
preproc.connect(calc_overlap_mbRef, 'out_file', mask_overlap_mbRef, 'in_file')

# determine ROI
get_roi = pe.Node(interface=fsl.ImageStats(op_string='-w'), name = 'get_roi') 
preproc.connect(mask_overlap_mbRef, 'out_file',  get_roi, 'in_file')

split_roi_coords = pe.Node(interface=util.Split(splits=[1,1,1,1,1,1,1,1]), name='split_roi_coords') 
preproc.connect(get_roi, 'out_stat',  split_roi_coords, 'inlist')


def unlist_long(mylist):
    """ returns first element of a list as long
    
    Arguments:
        mylist {list} -- list of numbers
    
    Returns:
        long -- first element of a list as long
    """

    r = mylist[0]
    return long(r)
Пример #18
0
def init_falff_wf(workdir: str | Path,
                  feature=None,
                  fwhm=None,
                  memcalc=MemoryCalculator.default()):
    """
    Calculate Amplitude of low frequency oscillations(ALFF) and
    fractional ALFF maps

    Returns
    -------
    workflow : workflow object
        ALFF workflow

    Notes
    -----
    Adapted from
    <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/alff/alff.py>

    """
    if feature is not None:
        name = f"{format_workflow(feature.name)}"
    else:
        name = "falff"
    if fwhm is not None:
        name = f"{name}_{int(float(fwhm) * 1e3):d}"
    name = f"{name}_wf"
    workflow = pe.Workflow(name=name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=["tags", "vals", "metadata", "bold", "mask", "fwhm"]),
        name="inputnode",
    )
    unfiltered_inputnode = pe.Node(
        niu.IdentityInterface(fields=["bold", "mask"]),
        name="unfiltered_inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]),
                         name="outputnode")

    if fwhm is not None:
        inputnode.inputs.fwhm = float(fwhm)
    elif feature is not None and hasattr(feature, "smoothing"):
        inputnode.inputs.fwhm = feature.smoothing.get("fwhm")

    #
    make_resultdicts = pe.Node(
        MakeResultdicts(tagkeys=["feature"],
                        imagekeys=["alff", "falff", "mask"]),
        name="make_resultdicts",
    )
    if feature is not None:
        make_resultdicts.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts, "metadata")
    workflow.connect(inputnode, "mask", make_resultdicts, "mask")

    workflow.connect(make_resultdicts, "resultdicts", outputnode,
                     "resultdicts")

    #
    resultdict_datasink = pe.Node(ResultdictDatasink(base_directory=workdir),
                                  name="resultdict_datasink")
    workflow.connect(make_resultdicts, "resultdicts", resultdict_datasink,
                     "indicts")

    # standard deviation of the filtered image
    stddev_filtered = pe.Node(afni.TStat(),
                              name="stddev_filtered",
                              mem_gb=memcalc.series_std_gb)
    stddev_filtered.inputs.outputtype = "NIFTI_GZ"
    stddev_filtered.inputs.options = "-stdev"
    workflow.connect(inputnode, "bold", stddev_filtered, "in_file")
    workflow.connect(inputnode, "mask", stddev_filtered, "mask")

    # standard deviation of the unfiltered image
    stddev_unfiltered = pe.Node(afni.TStat(),
                                name="stddev_unfiltered",
                                mem_gb=memcalc.series_std_gb)
    stddev_unfiltered.inputs.outputtype = "NIFTI_GZ"
    stddev_unfiltered.inputs.options = "-stdev"
    workflow.connect(unfiltered_inputnode, "bold", stddev_unfiltered,
                     "in_file")
    workflow.connect(unfiltered_inputnode, "mask", stddev_unfiltered, "mask")

    falff = pe.Node(afni.Calc(), name="falff", mem_gb=memcalc.volume_std_gb)
    falff.inputs.args = "-float"
    falff.inputs.expr = "(1.0*bool(a))*((1.0*b)/(1.0*c))"
    falff.inputs.outputtype = "NIFTI_GZ"
    workflow.connect(inputnode, "mask", falff, "in_file_a")
    workflow.connect(stddev_filtered, "out_file", falff, "in_file_b")
    workflow.connect(stddev_unfiltered, "out_file", falff, "in_file_c")

    #
    merge = pe.Node(niu.Merge(2), name="merge")
    workflow.connect(stddev_filtered, "out_file", merge, "in1")
    workflow.connect(falff, "out_file", merge, "in2")

    smooth = pe.MapNode(LazyBlurToFWHM(outputtype="NIFTI_GZ"),
                        iterfield="in_file",
                        name="smooth")
    workflow.connect(merge, "out", smooth, "in_file")
    workflow.connect(inputnode, "mask", smooth, "mask")
    workflow.connect(inputnode, "fwhm", smooth, "fwhm")

    zscore = pe.MapNode(ZScore(),
                        iterfield="in_file",
                        name="zscore",
                        mem_gb=memcalc.volume_std_gb)
    workflow.connect(smooth, "out_file", zscore, "in_file")
    workflow.connect(inputnode, "mask", zscore, "mask")

    split = pe.Node(niu.Split(splits=[1, 1]), name="split")
    workflow.connect(zscore, "out_file", split, "inlist")

    workflow.connect(split, "out1", make_resultdicts, "alff")
    workflow.connect(split, "out2", make_resultdicts, "falff")

    return workflow
Пример #19
0
def hcp_workflow(name='Evaluation_HCP',
                 settings={},
                 map_metric=False,
                 compute_fmb=False):
    """
    The regseg evaluation workflow for the human connectome project (HCP)
    """
    from nipype.pipeline import engine as pe
    from nipype.interfaces import utility as niu
    from nipype.interfaces import io as nio
    from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints
    from nipype.algorithms.misc import AddCSVRow
    from nipype.workflows.dmri.fsl.artifacts import sdc_fmb

    from .. import data
    from ..interfaces.utility import (ExportSlices, TileSlicesGrid,
                                      SlicesGridplot)
    from .registration import regseg_wf, sdc_t2b
    from .preprocess import preprocess
    from .fieldmap import process_vsm
    from .dti import mrtrix_dti
    import evaluation as ev

    wf = pe.Workflow(name=name)

    inputnode = pe.Node(
        niu.IdentityInterface(fields=['subject_id', 'data_dir']),
        name='inputnode')
    inputnode.inputs.data_dir = settings['data_dir']
    inputnode.iterables = [('subject_id', settings['subject_id'])]

    # Generate the distorted set, including surfaces
    pre = preprocess()
    rdti = mrtrix_dti('ReferenceDTI')
    wdti = mrtrix_dti('WarpedDTI')
    mdti = pe.Node(niu.Merge(2), name='MergeDTI')

    wf.connect([
        (inputnode, pre, [('subject_id', 'inputnode.subject_id'),
                          ('data_dir', 'inputnode.data_dir')]),
        (pre, rdti, [('outputnode.dwi', 'inputnode.in_dwi'),
                     ('outputnode.dwi_mask', 'inputnode.in_mask'),
                     ('outputnode.bvec', 'inputnode.in_bvec'),
                     ('outputnode.bval', 'inputnode.in_bval')]),
        (pre, wdti, [('outputnode.warped_dwi', 'inputnode.in_dwi'),
                     ('outputnode.warped_msk', 'inputnode.in_mask'),
                     ('outputnode.bvec', 'inputnode.in_bvec'),
                     ('outputnode.bval', 'inputnode.in_bval')]),
        (wdti, mdti, [('outputnode.fa', 'in1'), ('outputnode.md', 'in2')]),
    ])

    regseg = regseg_wf(usemask=True)
    regseg.inputs.inputnode.options = data.get('regseg_hcp')
    exprs = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
                                 axis=['axial', 'sagittal']),
                    name='ExportREGSEG')
    gridrs = pe.Node(SlicesGridplot(label=['regseg', 'regseg'],
                                    slices=[38, 48, 57, 67, 76, 86],
                                    view=['axial', 'sagittal']),
                     name='GridPlotREGSEG')
    meshrs = pe.MapNode(ComputeMeshWarp(),
                        iterfield=['surface1', 'surface2'],
                        name='REGSEGSurfDistance')
    csvrs = pe.Node(AddCSVRow(in_file=settings['out_csv']),
                    name="REGSEGAddRow")
    csvrs.inputs.method = 'REGSEG'

    wf.connect([(mdti, regseg, [('out', 'inputnode.in_fixed')]),
                (pre, regseg, [('outputnode.surf', 'inputnode.in_surf'),
                               ('outputnode.warped_msk', 'inputnode.in_mask')
                               ]),
                (pre, exprs, [('outputnode.warped_surf', 'sgreen')]),
                (regseg, exprs, [('outputnode.out_surf', 'syellow')]),
                (wdti, exprs, [('outputnode.fa', 'reference')]),
                (exprs, gridrs, [('out_files', 'in_files')]),
                (pre, meshrs, [('outputnode.warped_surf', 'surface1')]),
                (regseg, meshrs, [('outputnode.out_surf', 'surface2')]),
                (inputnode, csvrs, [('subject_id', 'subject_id')]),
                (meshrs, csvrs, [('distance', 'surf_dist')])])

    if compute_fmb:
        cmethod0 = sdc_fmb()
        selbmap = pe.Node(niu.Split(splits=[1, 1], squeeze=True),
                          name='SelectBmap')
        dfm = process_vsm()
        dfm.inputs.inputnode.scaling = 1.0
        dfm.inputs.inputnode.enc_dir = 'y-'
        wrpsurf = pe.MapNode(WarpPoints(),
                             iterfield=['points'],
                             name='UnwarpSurfs')
        export0 = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
                                       axis=['axial', 'sagittal']),
                          name='ExportFMB')
        mesh0 = pe.MapNode(ComputeMeshWarp(),
                           iterfield=['surface1', 'surface2'],
                           name='FMBSurfDistance')
        grid0 = pe.Node(SlicesGridplot(label=['FMB'] * 2,
                                       slices=[38, 48, 57, 67, 76, 86],
                                       view=['axial', 'sagittal']),
                        name='GridPlotFMB')
        csv0 = pe.Node(AddCSVRow(in_file=settings['out_csv']),
                       name="FMBAddRow")
        csv0.inputs.method = 'FMB'

        wf.connect([
            (pre, cmethod0, [('outputnode.warped_dwi', 'inputnode.in_file'),
                             ('outputnode.warped_msk', 'inputnode.in_mask'),
                             ('outputnode.bval', 'inputnode.in_bval'),
                             ('outputnode.mr_param', 'inputnode.settings')]),
            (pre, selbmap, [('outputnode.bmap_wrapped', 'inlist')]),
            (selbmap, cmethod0, [('out1', 'inputnode.bmap_mag'),
                                 ('out2', 'inputnode.bmap_pha')]),
            (cmethod0, dfm, [('outputnode.out_vsm', 'inputnode.vsm')]),
            (pre, dfm, [('outputnode.warped_msk', 'inputnode.reference')]),
            (dfm, wrpsurf, [('outputnode.dfm', 'warp')]),
            (pre, wrpsurf, [('outputnode.surf', 'points')
                            ])(wrpsurf, export0, [('out_points', 'syellow')]),
            (pre, export0, [('outputnode.warped_surf', 'sgreen')]),
            (wdti, export0, [('outputnode.fa', 'reference')]),
            (export0, grid0, [('out_files', 'in_files')]),
            (pre, mesh0, [('outputnode.warped_surf', 'surface1')]),
            (wrpsurf, mesh0, [('out_points', 'surface2')]),
            (inputnode, csv0, [('subject_id', 'subject_id')]),
            (mesh0, csv0, [('distance', 'surf_dist')])
        ])

    cmethod1 = sdc_t2b(num_threads=settings['nthreads'])
    export1 = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
                                   axis=['axial', 'sagittal']),
                      name='ExportT2B')
    grid1 = pe.Node(SlicesGridplot(label=['T2B'] * 2,
                                   slices=[38, 48, 57, 67, 76, 86],
                                   view=['axial', 'sagittal']),
                    name='GridPlotT2B')
    mesh1 = pe.MapNode(ComputeMeshWarp(),
                       iterfield=['surface1', 'surface2'],
                       name='T2BSurfDistance')
    csv1 = pe.Node(AddCSVRow(in_file=settings['out_csv']), name="T2BAddRow")
    csv1.inputs.method = 'T2B'

    wf.connect([(pre, cmethod1,
                 [('outputnode.warped_dwi', 'inputnode.in_dwi'),
                  ('outputnode.warped_msk', 'inputnode.dwi_mask'),
                  ('outputnode.t2w_brain', 'inputnode.in_t2w'),
                  ('outputnode.t1w_mask', 'inputnode.t2w_mask'),
                  ('outputnode.surf', 'inputnode.in_surf'),
                  ('outputnode.bval', 'inputnode.in_bval'),
                  ('outputnode.mr_param', 'inputnode.in_param')]),
                (cmethod1, export1, [('outputnode.out_surf', 'syellow')]),
                (pre, export1, [('outputnode.warped_surf', 'sgreen')]),
                (wdti, export1, [('outputnode.fa', 'reference')]),
                (export1, grid1, [('out_files', 'in_files')]),
                (pre, mesh1, [('outputnode.warped_surf', 'surface1')]),
                (cmethod1, mesh1, [('outputnode.out_surf', 'surface2')]),
                (inputnode, csv1, [('subject_id', 'subject_id')]),
                (mesh1, csv1, [('distance', 'surf_dist')])])

    tile = pe.Node(TileSlicesGrid(), name='TileGridplots')
    csvtile = pe.Node(AddCSVRow(
        in_file=op.join(op.dirname(settings['out_csv']), 'tiles.csv')),
                      name="TileAddRow")

    wf.connect([(inputnode, tile, [('subject_id', 'out_file')]),
                (gridrs, tile, [('out_file', 'in_reference')]),
                (grid1, tile, [('out_file', 'in_competing')]),
                (tile, csvtile, [('out_file', 'names')])])

    if map_metric:
        out_csv = op.abspath(op.join(name, 'energiesmapping.csv'))
        mapen = ev.map_energy(out_csv=out_csv)
        wf.connect([
            (inputnode, mapen, [('subject_id', 'inputnode.subject_id')]),
            (regseg, mapen, [('outputnode.out_enh', 'inputnode.reference'),
                             ('outputnode.reg_msk', 'inputnode.in_mask')]),
            (pre, mapen, [('outputnode.warped_surf', 'inputnode.surfaces0'),
                          ('outputnode.surf', 'inputnode.surfaces1')])
        ])

    return wf