コード例 #1
0
def airmsk_wf(name='AirMaskWorkflow', save_memory=False, ants_settings=None):
    """Implements the Step 1 of [Mortamet2009]_."""
    import pkg_resources as pkgr
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_file', 'in_noinu', 'in_mask', 'head_mask']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['out_file', 'artifact_msk']),
        name='outputnode')

    antsparms = pe.Node(nio.JSONFileGrabber(), name='ants_settings')
    antsparms.inputs.in_file = (ants_settings if ants_settings is not None else
                                pkgr.resource_filename(
                                    'mriqc', 'data/ants_settings.json'))

    def _invt_flags(transforms):
        return [True] * len(transforms)

    # Spatial normalization, using ANTs
    norm = pe.Node(ants.Registration(dimension=3), name='normalize')

    if save_memory:
        norm.inputs.fixed_image = op.join(get_mni_template(),
                                          'MNI152_T1_2mm.nii.gz')
        norm.inputs.fixed_image_mask = op.join(
            get_mni_template(), 'MNI152_T1_2mm_brain_mask.nii.gz')
    else:
        norm.inputs.fixed_image = op.join(get_mni_template(),
                                          'MNI152_T1_1mm.nii.gz')
        norm.inputs.fixed_image_mask = op.join(
            get_mni_template(), 'MNI152_T1_1mm_brain_mask.nii.gz')

    invt = pe.Node(ants.ApplyTransforms(dimension=3,
                                        default_value=1,
                                        interpolation='NearestNeighbor'),
                   name='invert_xfm')
    invt.inputs.input_image = op.join(get_mni_template(),
                                      'MNI152_T1_1mm_brain_bottom.nii.gz')

    # Combine and invert mask
    combine = pe.Node(niu.Function(input_names=['head_mask', 'artifact_msk'],
                                   output_names=['out_file'],
                                   function=combine_masks),
                      name='combine_masks')

    qi1 = pe.Node(ArtifactMask(), name='ArtifactMask')

    workflow.connect([(antsparms, norm, [
        ('initial_moving_transform_com', 'initial_moving_transform_com'),
        ('winsorize_lower_quantile', 'winsorize_lower_quantile'),
        ('winsorize_upper_quantile', 'winsorize_upper_quantile'),
        ('float', 'float'), ('transforms', 'transforms'),
        ('transform_parameters', 'transform_parameters'),
        ('number_of_iterations', 'number_of_iterations'),
        ('convergence_window_size', 'convergence_window_size'),
        ('metric', 'metric'), ('metric_weight', 'metric_weight'),
        ('radius_or_number_of_bins', 'radius_or_number_of_bins'),
        ('sampling_strategy', 'sampling_strategy'),
        ('sampling_percentage', 'sampling_percentage'),
        ('smoothing_sigmas', 'smoothing_sigmas'),
        ('shrink_factors', 'shrink_factors'),
        ('convergence_threshold', 'convergence_threshold'),
        ('sigma_units', 'sigma_units'),
        ('use_estimate_learning_rate_once', 'use_estimate_learning_rate_once'),
        ('use_histogram_matching', 'use_histogram_matching')
    ]), (inputnode, qi1, [('in_file', 'in_file')]),
                      (inputnode, norm, [('in_noinu', 'moving_image'),
                                         ('in_mask', 'moving_image_mask')]),
                      (norm, invt, [('forward_transforms', 'transforms'),
                                    (('forward_transforms', _invt_flags),
                                     'invert_transform_flags')]),
                      (inputnode, invt, [('in_mask', 'reference_image')]),
                      (inputnode, combine, [('head_mask', 'head_mask')]),
                      (invt, combine, [('output_image', 'artifact_msk')]),
                      (combine, qi1, [('out_file', 'air_msk')]),
                      (qi1, outputnode, [('out_air_msk', 'out_file'),
                                         ('out_art_msk', 'artifact_msk')])])
    return workflow
コード例 #2
0
ファイル: syn.py プロジェクト: ccharpen/fmriprep
def init_syn_sdc_wf(omp_nthreads,
                    bold_pe=None,
                    atlas_threshold=3,
                    name='syn_sdc_wf'):
    """
    This workflow takes a skull-stripped T1w image and reference BOLD image and
    estimates a susceptibility distortion correction warp, using ANTs symmetric
    normalization (SyN) and the average fieldmap atlas described in
    [Treiber2016]_.

    SyN deformation is restricted to the phase-encoding (PE) direction.
    If no PE direction is specified, anterior-posterior PE is assumed.

    SyN deformation is also restricted to regions that are expected to have a
    >3mm (approximately 1 voxel) warp, based on the fieldmap atlas.

    This technique is a variation on those developed in [Huntenburg2014]_ and
    [Wang2017]_.

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.fieldmap.syn import init_syn_sdc_wf
        wf = init_syn_sdc_wf(
            bold_pe='j',
            omp_nthreads=8)

    **Inputs**

        bold_ref
            reference image
        bold_ref_brain
            skull-stripped reference image
        template : str
            Name of template targeted by ``template`` output space
        t1_brain
            skull-stripped, bias-corrected structural image
        t1_2_mni_reverse_transform
            inverse registration transform of T1w image to MNI template

    **Outputs**

        out_reference
            the ``bold_ref`` image after unwarping
        out_reference_brain
            the ``bold_ref_brain`` image after unwarping
        out_warp
            the corresponding :abbr:`DFM (displacements field map)` compatible with
            ANTs
        out_mask
            mask of the unwarped input file

    """
    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface([
        'bold_ref', 'bold_ref_brain', 'template', 't1_brain',
        't1_2_mni_reverse_transform'
    ]),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        ['out_reference', 'out_reference_brain', 'out_mask', 'out_warp']),
                         name='outputnode')

    if bold_pe is None or bold_pe[0] not in ['i', 'j']:
        LOGGER.warning(
            'Incorrect phase-encoding direction, assuming PA (posterior-to-anterior).'
        )
        bold_pe = 'j'

    # Collect predefined data
    # Atlas image and registration affine
    atlas_img = pkgr.resource_filename('fmriprep', 'data/fmap_atlas.nii.gz')
    # Registration specifications
    affine_transform = pkgr.resource_filename('fmriprep', 'data/affine.json')
    syn_transform = pkgr.resource_filename('fmriprep',
                                           'data/susceptibility_syn.json')

    invert_t1w = pe.Node(InvertT1w(), name='invert_t1w', mem_gb=0.3)

    ref_2_t1 = pe.Node(Registration(from_file=affine_transform),
                       name='ref_2_t1',
                       n_procs=omp_nthreads)
    t1_2_ref = pe.Node(ApplyTransforms(invert_transform_flags=[True]),
                       name='t1_2_ref',
                       n_procs=omp_nthreads)

    # 1) BOLD -> T1; 2) MNI -> T1; 3) ATLAS -> MNI
    transform_list = pe.Node(niu.Merge(3),
                             name='transform_list',
                             mem_gb=DEFAULT_MEMORY_MIN_GB)

    # Inverting (1), then applying in reverse order:
    #
    # ATLAS -> MNI -> T1 -> BOLD
    atlas_2_ref = pe.Node(
        ApplyTransforms(invert_transform_flags=[True, False, False]),
        name='atlas_2_ref',
        n_procs=omp_nthreads,
        mem_gb=0.3)
    atlas_2_ref.inputs.input_image = atlas_img

    threshold_atlas = pe.Node(fsl.maths.MathsCommand(
        args='-thr {:.8g} -bin'.format(atlas_threshold),
        output_datatype='char'),
                              name='threshold_atlas',
                              mem_gb=0.3)

    fixed_image_masks = pe.Node(niu.Merge(2),
                                name='fixed_image_masks',
                                mem_gb=DEFAULT_MEMORY_MIN_GB)
    fixed_image_masks.inputs.in1 = 'NULL'

    restrict = [[int(bold_pe[0] == 'i'), int(bold_pe[0] == 'j'), 0]] * 2
    syn = pe.Node(Registration(from_file=syn_transform,
                               restrict_deformation=restrict),
                  name='syn',
                  n_procs=omp_nthreads)

    unwarp_ref = pe.Node(ApplyTransforms(dimension=3,
                                         float=True,
                                         interpolation='LanczosWindowedSinc'),
                         name='unwarp_ref')

    skullstrip_bold_wf = init_skullstrip_bold_wf()

    workflow.connect([
        (inputnode, invert_t1w, [('t1_brain', 'in_file'),
                                 ('bold_ref', 'ref_file')]),
        (inputnode, ref_2_t1, [('bold_ref_brain', 'moving_image')]),
        (invert_t1w, ref_2_t1, [('out_file', 'fixed_image')]),
        (inputnode, t1_2_ref, [('bold_ref', 'reference_image')]),
        (invert_t1w, t1_2_ref, [('out_file', 'input_image')]),
        (ref_2_t1, t1_2_ref, [('forward_transforms', 'transforms')]),
        (ref_2_t1, transform_list, [('forward_transforms', 'in1')]),
        (inputnode, transform_list, [('t1_2_mni_reverse_transform', 'in2'),
                                     (('template', _prior_path), 'in3')]),
        (inputnode, atlas_2_ref, [('bold_ref', 'reference_image')]),
        (transform_list, atlas_2_ref, [('out', 'transforms')]),
        (atlas_2_ref, threshold_atlas, [('output_image', 'in_file')]),
        (threshold_atlas, fixed_image_masks, [('out_file', 'in2')]),
        (inputnode, syn, [('bold_ref_brain', 'moving_image')]),
        (t1_2_ref, syn, [('output_image', 'fixed_image')]),
        (fixed_image_masks, syn, [('out', 'fixed_image_masks')]),
        (syn, outputnode, [('forward_transforms', 'out_warp')]),
        (syn, unwarp_ref, [('forward_transforms', 'transforms')]),
        (inputnode, unwarp_ref, [('bold_ref', 'reference_image'),
                                 ('bold_ref', 'input_image')]),
        (unwarp_ref, skullstrip_bold_wf, [('output_image', 'inputnode.in_file')
                                          ]),
        (unwarp_ref, outputnode, [('output_image', 'out_reference')]),
        (skullstrip_bold_wf, outputnode,
         [('outputnode.skull_stripped_file', 'out_reference_brain'),
          ('outputnode.mask_file', 'out_mask')]),
    ])

    return workflow
コード例 #3
0
def run_func_preproc(functional_scan, start_idx=None, stop_idx=None, \
                         out_dir=None, run=True):
    """Run the 'func_preproc_workflow' function to execute the modular
    workflow with the provided inputs.

    :type functional_scan: str
    :param functional_scan: Filepath to the raw functional timeseries image in
                             a NIFTI file.
    :type start_idx: int
    :param start_idx: (default: None) The timeseries timepoint/volume to start
                       with - i.e. will only include this timepoint and the
                      ones after it - setting this to None will include all
                      timepoints from the beginning.
    :type stop_idx: int
    :param stop_idx: (default: None) The timeseries timepoint/volume to end
                     with - i.e. will only include this timepoint and the
                     ones before it - setting this to None will include all
                     timepoints up until the end of the timeseries.
    :type out_dir: str
    :param out_dir: (default: None) The output directory to write the results
                    to; if left as None, will write to the current directory.
    :type run: bool
    :param run: (default: True) Will run the workflow; if set to False, will
                connect the Nipype workflow and return the workflow object
                instead.
    :rtype: str
    :return: (if run=True) The filepath of the generated anatomical_reorient
             file.
    :rtype: Nipype workflow object
    :return: (if run=False) The connected Nipype workflow object.
    :rtype: str
    :return: (if run=False) The base directory of the workflow if it were to
             be run.
    """

    import os
    import glob

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    output = "func_preproc"

    workflow = pe.Workflow(name='%s_workflow' % output)

    if not out_dir:
        out_dir = os.getcwd()

    workflow_dir = os.path.join(out_dir, "workflow_output", output)
    workflow.base_dir = workflow_dir

    resource_pool = {}
    config = {}
    num_cores_per_subject = 1

    resource_pool["functional_scan"] = functional_scan

    if start_idx:
        config["start_idx"] = start_idx
    if stop_idx:
        config["stop_idx"] = stop_idx
    
    workflow, resource_pool = \
            func_preproc_workflow(workflow, resource_pool, config)

    ds = pe.Node(nio.DataSink(), name='datasink_func_motion_correct')
    ds.inputs.base_directory = workflow_dir
    
    node, out_file = resource_pool["func_reorient"]

    workflow.connect(node, out_file, ds, 'func_reorient')

    if run:
        workflow.run(plugin='MultiProc', plugin_args= \
                         {'n_procs': num_cores_per_subject})
        outpath = glob.glob(os.path.join(workflow_dir, "func_reorient",\
                                         "*"))[0]
        return outpath
        
    else:
        return workflow, workflow.base_dir
コード例 #4
0
def create_target_angle(name='target_angle'):
    """
    Target Angle Calculation
    
    Parameters
    ----------
    name : string, optional
        Name of the workflow.
            
    Returns
    -------
    target_angle : nipype.pipeline.engine.Workflow
        Target angle workflow.
    
    Notes
    -----
    
    Workflow Inputs::
    
        inputspec.subjects : list (nifti files)
            List of subject paths.
    
    Workflow Outputs::
    
        outputspec.target_angle : float
            Target angle over the provided group of subjects.
            
    Target Angle procedure:
    
    1. Compute the median angle and mean bold amplitude of each subject in the group.
    2. Fit a linear model with median angle as the dependent variable.
    3. Calculate the corresponding median_angle on the fitted model for the subject 
       with the smallest mean bold amplitude of the group.
    
    Workflow Graph:
    
    .. image:: ../images/target_angle.dot.png
        :width: 500
    
    Detailed Workflow Graph:
    
    .. image:: ../images/target_angle_detailed.dot.png
        :width: 500
        
    """
    target_angle = pe.Workflow(name=name)
    
    inputspec = pe.Node(util.IdentityInterface(fields=['subjects']),
                        name='inputspec')
    outputspec = pe.Node(util.IdentityInterface(fields=['target_angle']),
                         name='outputspec')
    
    cmap = pe.MapNode(util.Function(input_names=['subject'],
                                    output_names=['mean_bold',
                                                  'median_angle'],
                                    function=calc_median_angle_params),
                      name='median_angle_params',
                      iterfield=['subject'])
    
    cta = pe.Node(util.Function(input_names=['mean_bolds',
                                             'median_angles'],
                                output_names=['target_angle'],
                                function=calc_target_angle),
                  name='target_angle')
    
    target_angle.connect(inputspec, 'subjects',
                         cmap, 'subject')
    target_angle.connect(cmap, 'mean_bold',
                         cta, 'mean_bolds')
    target_angle.connect(cmap, 'median_angle',
                         cta, 'median_angles')
    target_angle.connect(cta, 'target_angle',
                         outputspec, 'target_angle')
    
    return target_angle
    
コード例 #5
0
ファイル: outputs.py プロジェクト: HippocampusGirl/sdcflows
def init_sdc_unwarp_report_wf(name='sdc_unwarp_report_wf', forcedsyn=False):
    """
    Save a reportlet showing how SDC unwarping performed.

    This workflow generates and saves a reportlet showing the effect of fieldmap
    unwarping a BOLD image.

    Workflow Graph
        .. workflow::
            :graph2use: orig
            :simple_form: yes

            from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
            wf = init_sdc_unwarp_report_wf()

    Parameters
    ----------
    name : str, optional
        Workflow name (default: ``sdc_unwarp_report_wf``)
    forcedsyn : bool, optional
        Whether SyN-SDC was forced.

    Inputs
    ------
    in_pre
        Reference image, before unwarping
    in_post
        Reference image, after unwarping
    in_seg
        Segmentation of preprocessed structural image, including
        gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
    in_xfm
        Affine transform from T1 space to BOLD space (ITK format)

    """
    from niworkflows.interfaces import SimpleBeforeAfter
    from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
    from niworkflows.utils.images import dseg_label as _dseg_label

    DEFAULT_MEMORY_MIN_GB = 0.01

    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_pre', 'in_post', 'in_seg', 'in_xfm']),
                        name='inputnode')

    map_seg = pe.Node(ApplyTransforms(dimension=3,
                                      float=True,
                                      interpolation='MultiLabel'),
                      name='map_seg',
                      mem_gb=0.3)

    sel_wm = pe.Node(niu.Function(function=_dseg_label),
                     name='sel_wm',
                     mem_gb=DEFAULT_MEMORY_MIN_GB)
    sel_wm.inputs.label = 2

    bold_rpt = pe.Node(SimpleBeforeAfter(), name='bold_rpt', mem_gb=0.1)
    ds_report_sdc = pe.Node(DerivativesDataSink(desc=('sdc',
                                                      'forcedsyn')[forcedsyn],
                                                suffix='bold',
                                                datatype='figures'),
                            name='ds_report_sdc',
                            mem_gb=DEFAULT_MEMORY_MIN_GB,
                            run_without_submitting=True)

    workflow.connect([
        (inputnode, bold_rpt, [('in_post', 'after'), ('in_pre', 'before')]),
        (bold_rpt, ds_report_sdc, [('out_report', 'in_file')]),
        (inputnode, map_seg, [('in_post', 'reference_image'),
                              ('in_seg', 'input_image'),
                              ('in_xfm', 'transforms')]),
        (map_seg, sel_wm, [('output_image', 'in_seg')]),
        (sel_wm, bold_rpt, [('out', 'wm_seg')]),
    ])

    return workflow
コード例 #6
0
def create_freesurfer_pet_quantification_wf(name="fspetquant"):
    inputnode = pe.Node(interface=util.IdentityInterface(
        fields=["subject_id", "subjects_dir", "pet"]),
                        name="inputnode")

    FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(),
                               name='fssource')
    mri_convert_Brain = pe.Node(interface=fs.MRIConvert(),
                                name='mri_convert_Brain')
    mri_convert_Brain.inputs.out_type = 'niigz'
    mri_convert_Brain.inputs.no_change = True

    mri_convert_ROIs = mri_convert_Brain.clone("mri_convert_ROIs")
    mri_convert_T1 = mri_convert_Brain.clone("mri_convert_T1")

    fast_seg_T1 = pe.Node(interface=fsl.FAST(), name='fast_seg_T1')
    fast_seg_T1.inputs.segments = True
    fast_seg_T1.inputs.probability_maps = True

    coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister')
    coregister.inputs.cost = ('corratio')
    coregister.inputs.interp = 'trilinear'

    convertxfm = pe.Node(interface=fsl.ConvertXFM(), name='convertxfm')
    convertxfm.inputs.invert_xfm = True

    applyxfm_t1 = pe.Node(interface=fsl.ApplyXfm(), name='applyxfm_t1')
    applyxfm_t1.inputs.apply_xfm = True
    applyxfm_t1.inputs.interp = 'trilinear'

    applyxfm_gm = applyxfm_t1.clone("applyxfm_gm")
    applyxfm_gm.inputs.interp = 'nearestneighbour'
    applyxfm_wm = applyxfm_gm.clone("applyxfm_wm")
    applyxfm_csf = applyxfm_gm.clone("applyxfm_csf")

    applyxfm_rois = applyxfm_t1.clone("applyxfm_rois")
    applyxfm_rois.inputs.interp = 'nearestneighbour'

    applyxfm_CorrectedPET = pe.Node(interface=fsl.ApplyXfm(),
                                    name='applyxfm_CorrectedPET')
    applyxfm_CorrectedPET.inputs.apply_xfm = True
    applyxfm_CorrectedPET.inputs.interp = 'trilinear'

    pve_correction = pe.Node(interface=ci.PartialVolumeCorrection(),
                             name='pve_correction')
    pve_correction.inputs.skip_atlas = False
    pve_correction.inputs.use_fs_LUT = True

    workflow = pe.Workflow(name=name)
    workflow.base_output_dir = name

    workflow.connect([(inputnode, FreeSurferSource, [("subjects_dir",
                                                      "subjects_dir")])])
    workflow.connect([(inputnode, FreeSurferSource, [("subject_id",
                                                      "subject_id")])])

    workflow.connect([(FreeSurferSource, mri_convert_T1, [('T1', 'in_file')])])
    workflow.connect([(FreeSurferSource, mri_convert_Brain, [('brain',
                                                              'in_file')])])
    workflow.connect([(FreeSurferSource, mri_convert_ROIs,
                       [(('aparc_aseg', select_aparc), 'in_file')])])

    workflow.connect([(mri_convert_T1, coregister, [('out_file', 'reference')])
                      ])
    workflow.connect([(mri_convert_Brain, fast_seg_T1, [('out_file',
                                                         'in_files')])])
    workflow.connect([(inputnode, fast_seg_T1, [('subject_id', 'out_basename')
                                                ])])
    workflow.connect([(inputnode, coregister, [('pet', 'in_file')])])
    workflow.connect([(coregister, convertxfm, [('out_matrix_file', 'in_file')
                                                ])])
    workflow.connect([(convertxfm, applyxfm_t1, [('out_file', 'in_matrix_file')
                                                 ])])
    workflow.connect([(convertxfm, applyxfm_gm, [('out_file', 'in_matrix_file')
                                                 ])])
    workflow.connect([(convertxfm, applyxfm_wm, [('out_file', 'in_matrix_file')
                                                 ])])
    workflow.connect([(convertxfm, applyxfm_csf, [('out_file',
                                                   'in_matrix_file')])])
    workflow.connect([(convertxfm, applyxfm_rois, [('out_file',
                                                    'in_matrix_file')])])

    workflow.connect([(inputnode, applyxfm_t1, [('pet', 'reference')])])
    workflow.connect([(mri_convert_T1, applyxfm_t1, [('out_file', 'in_file')])
                      ])

    workflow.connect([(inputnode, applyxfm_gm, [('pet', 'reference')])])
    workflow.connect([(fast_seg_T1, applyxfm_gm, [(('partial_volume_files',
                                                    select_GM), 'in_file')])])

    workflow.connect([(inputnode, applyxfm_wm, [('pet', 'reference')])])
    workflow.connect([(fast_seg_T1, applyxfm_wm, [(('partial_volume_files',
                                                    select_WM), 'in_file')])])

    workflow.connect([(inputnode, applyxfm_csf, [('pet', 'reference')])])
    workflow.connect([(fast_seg_T1, applyxfm_csf,
                       [(('partial_volume_files', select_CSF), 'in_file')])])

    workflow.connect([(inputnode, applyxfm_rois, [('pet', 'reference')])])
    workflow.connect([(mri_convert_ROIs, applyxfm_rois, [('out_file',
                                                          'in_file')])])

    workflow.connect([(applyxfm_t1, pve_correction, [('out_file', 't1_file')])
                      ])
    workflow.connect([(inputnode, pve_correction, [('pet', 'pet_file')])])
    workflow.connect([(applyxfm_gm, pve_correction, [('out_file',
                                                      'grey_matter_file')])])
    workflow.connect([(applyxfm_wm, pve_correction, [('out_file',
                                                      'white_matter_file')])])
    workflow.connect([(applyxfm_csf, pve_correction, [('out_file', 'csf_file')
                                                      ])])
    workflow.connect([(applyxfm_rois, pve_correction, [('out_file', 'roi_file')
                                                       ])])

    workflow.connect([(pve_correction, applyxfm_CorrectedPET,
                       [('mueller_gartner_rousset', 'in_file')])])
    workflow.connect([(mri_convert_T1, applyxfm_CorrectedPET,
                       [('out_file', 'reference')])])
    workflow.connect([(coregister, applyxfm_CorrectedPET,
                       [('out_matrix_file', 'in_matrix_file')])])

    output_fields = [
        "out_files", "pet_to_t1", "corrected_pet_to_t1", "pet_results_npz",
        "pet_results_mat"
    ]

    outputnode = pe.Node(
        interface=util.IdentityInterface(fields=output_fields),
        name="outputnode")

    workflow.connect([
        (pve_correction, outputnode, [("out_files", "out_files")]),
        (pve_correction, outputnode, [("results_numpy_npz", "pet_results_npz")
                                      ]),
        (pve_correction, outputnode, [("results_matlab_mat", "pet_results_mat")
                                      ]),
        (applyxfm_CorrectedPET, outputnode, [("out_file",
                                              "corrected_pet_to_t1")]),
        (coregister, outputnode, [("out_file", "pet_to_t1")]),
    ])

    return workflow
コード例 #7
0
ファイル: outputs.py プロジェクト: wayne009007/sdcflows
def init_fmap_derivatives_wf(
    *,
    output_dir,
    bids_fmap_id=None,
    custom_entities=None,
    name="fmap_derivatives_wf",
    write_coeff=False,
):
    """
    Set up datasinks to store derivatives in the right location.

    Parameters
    ----------
    output_dir : :obj:`str`
        Directory in which to save derivatives
    bids_fmap_id : :obj:`str`
        Sets the ``B0FieldIdentifier`` metadata into the outputs.
    custom_entities : :obj:`dict`
        Define extra entities that will be written out in filenames.
    name : :obj:`str`
        Workflow name (default: ``"fmap_derivatives_wf"``)
    write_coeff : :obj:`bool`
        Build the workflow path to map coefficients into target space.

    Inputs
    ------
    source_files
        One or more fieldmap file(s) of the BIDS dataset that will serve for naming reference.
    fieldmap
        The preprocessed fieldmap, in its original space with Hz units.
    fmap_coeff
        Field coefficient(s) file(s)
    fmap_ref
        An anatomical reference (e.g., magnitude file)

    """
    custom_entities = custom_entities or {}
    if bids_fmap_id:
        custom_entities["fmapid"] = bids_fmap_id.replace("_", "")

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "source_files", "fieldmap", "fmap_coeff", "fmap_ref", "fmap_meta"
        ]),
        name="inputnode",
    )

    ds_reference = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            compress=True,
            suffix="fieldmap",
            dismiss_entities=("fmap", ),
            allowed_entities=tuple(custom_entities.keys()),
        ),
        name="ds_reference",
    )

    ds_fieldmap = pe.Node(
        DerivativesDataSink(
            base_directory=output_dir,
            desc="preproc",
            suffix="fieldmap",
            compress=True,
            allowed_entities=tuple(custom_entities.keys()),
        ),
        name="ds_fieldmap",
    )
    ds_fieldmap.inputs.Units = "Hz"
    if bids_fmap_id:
        ds_fieldmap.inputs.B0FieldIdentifier = bids_fmap_id

    for k, v in custom_entities.items():
        setattr(ds_reference.inputs, k, v)
        setattr(ds_fieldmap.inputs, k, v)

    # fmt:off
    workflow.connect([
        (inputnode, ds_reference, [("source_files", "source_file"),
                                   ("fmap_ref", "in_file"),
                                   (("source_files", _getsourcetype), "desc")
                                   ]),
        (inputnode, ds_fieldmap, [("source_files", "source_file"),
                                  ("fieldmap", "in_file"),
                                  ("source_files", "RawSources")]),
        (ds_reference, ds_fieldmap, [
            (("out_file", _getname), "AnatomicalReference"),
        ]),
        (inputnode, ds_fieldmap, [(("fmap_meta", _selectintent), "IntendedFor")
                                  ]),
    ])
    # fmt:on

    if not write_coeff:
        return workflow

    ds_coeff = pe.MapNode(
        DerivativesDataSink(
            base_directory=output_dir,
            suffix="fieldmap",
            compress=True,
            allowed_entities=tuple(custom_entities.keys()),
        ),
        name="ds_coeff",
        iterfield=("in_file", "desc"),
    )

    gen_desc = pe.Node(niu.Function(function=_gendesc), name="gen_desc")

    for k, v in custom_entities.items():
        setattr(ds_coeff.inputs, k, v)

    # fmt:off
    workflow.connect([
        (inputnode, ds_coeff, [("source_files", "source_file"),
                               ("fmap_coeff", "in_file")]),
        (inputnode, gen_desc, [("fmap_coeff", "infiles")]),
        (gen_desc, ds_coeff, [("out", "desc")]),
        (ds_coeff, ds_fieldmap, [(("out_file", _getname),
                                  "AssociatedCoefficients")]),
    ])
    # fmt:on

    return workflow
コード例 #8
0
    def test_repository_roundtrip(self):

        # Create working dirs
        # Create DarisSource node
        repository = XnatRepo(
            project_id=self.project,
            server=SERVER, cache_dir=self.cache_dir)
        study = DummyStudy(
            self.STUDY_NAME, repository, processor=SingleProc('a_dir'),
            inputs=[InputFilesets('source1', 'source1', text_format),
                    InputFilesets('source2', 'source2', text_format),
                    InputFilesets('source3', 'source3', text_format),
                    InputFilesets('source4', 'source4', text_format)])
        # TODO: Should test out other file formats as well.
        source_files = ['source1', 'source2', 'source3', 'source4']
        sink_files = ['sink1', 'sink3', 'sink4']
        inputnode = pe.Node(IdentityInterface(['subject_id',
                                               'visit_id']),
                            'inputnode')
        inputnode.inputs.subject_id = str(self.SUBJECT)
        inputnode.inputs.visit_id = str(self.VISIT)
        source = pe.Node(
            RepositorySource(
                study.bound_spec(f).collection for f in source_files),
            name='source')
        dummy_pipeline = study.dummy_pipeline()
        dummy_pipeline.cap()
        sink = pe.Node(
            RepositorySink(
                (study.bound_spec(f).collection for f in sink_files),
                dummy_pipeline),
            name='sink')
        sink.inputs.name = 'repository-roundtrip-unittest'
        sink.inputs.desc = (
            "A test session created by repository roundtrip unittest")
        # Create workflow connecting them together
        workflow = pe.Workflow('source-sink-unit-test',
                               base_dir=self.work_dir)
        workflow.add_nodes((source, sink))
        workflow.connect(inputnode, 'subject_id', source, 'subject_id')
        workflow.connect(inputnode, 'visit_id', source, 'visit_id')
        workflow.connect(inputnode, 'subject_id', sink, 'subject_id')
        workflow.connect(inputnode, 'visit_id', sink, 'visit_id')
        for source_name in source_files:
            if source_name != 'source2':
                sink_name = source_name.replace('source', 'sink')
                workflow.connect(
                    source, source_name + PATH_SUFFIX,
                    sink, sink_name + PATH_SUFFIX)
        workflow.run()
        # Check cache was created properly
        self.assertEqual(filter_scans(os.listdir(self.session_cache())),
                         ['source1', 'source2',
                          'source3', 'source4'])
        expected_sink_filesets = ['sink1', 'sink3', 'sink4']
        self.assertEqual(
            filter_scans(os.listdir(self.session_cache(
                from_study=self.STUDY_NAME))), expected_sink_filesets)
        with self._connect() as login:
            fileset_names = filter_scans(login.experiments[self.session_label(
                from_study=self.STUDY_NAME)].scans.keys())
        self.assertEqual(fileset_names, expected_sink_filesets)
コード例 #9
0
def CreateANTSRegistrationWorkflow(WFname,
                                   CLUSTER_QUEUE,
                                   CLUSTER_QUEUE_LONG,
                                   NumberOfThreads=-1):
    ANTSWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'fixedVolumesList', 'movingVolumesList', 'initial_moving_transform',
        'fixedBinaryVolume', 'movingBinaryVolume', 'warpFixedVolumesList'
    ]),
                         name='inputspec')

    print("""Run ANTS Registration""")

    BFitAtlasToSubject = pe.Node(interface=BRAINSFit(), name="bfA2S")
    BF_cpu_sge_options_dictionary = {
        'qsub_args':
        '-S /bin/bash -pe smp1 2-12 -l h_vmem=14G,mem_free=4G -o /dev/null -e /dev/null '
        + CLUSTER_QUEUE,
        'overwrite':
        True
    }
    BFitAtlasToSubject.plugin_args = BF_cpu_sge_options_dictionary
    BFitAtlasToSubject.inputs.costMetric = "MMI"
    BFitAtlasToSubject.inputs.numberOfSamples = 1000000
    BFitAtlasToSubject.inputs.numberOfIterations = [1500]
    BFitAtlasToSubject.inputs.numberOfHistogramBins = 50
    BFitAtlasToSubject.inputs.maximumStepLength = 0.2
    BFitAtlasToSubject.inputs.minimumStepLength = [0.000005]
    BFitAtlasToSubject.inputs.useAffine = True  # Using initial transform from BRAINSABC
    BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter = 65
    BFitAtlasToSubject.inputs.outputVolume = "Trial_Initializer_Output.nii.gz"
    # Bug in BRAINSFit PREDICTIMG-1379 BFitAtlasToSubject.inputs.outputFixedVolumeROI="FixedROI.nii.gz"
    # Bug in BRAINSFit PREDICTIMG-1379 BFitAtlasToSubject.inputs.outputMovingVolumeROI="MovingROI.nii.gz"
    BFitAtlasToSubject.inputs.outputTransform = "Trial_Initializer_Output.h5"
    BFitAtlasToSubject.inputs.maskProcessingMode = "ROIAUTO"
    BFitAtlasToSubject.inputs.ROIAutoDilateSize = 4
    # BFitAtlasToSubject.inputs.maskProcessingMode="ROI"
    # ANTSWF.connect(inputsSpec,'fixedBinaryVolume',BFitAtlasToSubject,'fixedBinaryVolume')
    # ANTSWF.connect(inputsSpec,'movingBinaryVolume',BFitAtlasToSubject,'movingBinaryVolume')
    ANTSWF.connect(inputsSpec, 'fixedVolumesList', BFitAtlasToSubject,
                   'fixedVolume')
    ANTSWF.connect(inputsSpec, 'movingVolumesList', BFitAtlasToSubject,
                   'movingVolume')
    ANTSWF.connect(inputsSpec, 'initial_moving_transform', BFitAtlasToSubject,
                   'initialTransform')

    ComputeAtlasToSubjectTransform = pe.Node(interface=antsRegistration(),
                                             name="antsA2S")
    many_cpu_sge_options_dictionary = {
        'qsub_args':
        '-S /bin/bash -pe smp1 5-12 -l h_vmem=17G,mem_free=9G -o /dev/null -e /dev/null '
        + CLUSTER_QUEUE,
        'overwrite':
        True
    }
    ComputeAtlasToSubjectTransform.plugin_args = many_cpu_sge_options_dictionary

    ComputeAtlasToSubjectTransform.inputs.dimension = 3
    ComputeAtlasToSubjectTransform.inputs.metric = 'CC'  # This is a family of interfaces, CC,MeanSquares,Demons,GC,MI,Mattes
    ComputeAtlasToSubjectTransform.inputs.transform = 'SyN[0.25,3.0,0.0]'
    ComputeAtlasToSubjectTransform.inputs.number_of_iterations = [250, 100, 20]
    ComputeAtlasToSubjectTransform.inputs.convergence_threshold = 1e-7
    ComputeAtlasToSubjectTransform.inputs.smoothing_sigmas = [0, 0, 0]
    ComputeAtlasToSubjectTransform.inputs.shrink_factors = [3, 2, 1]
    ComputeAtlasToSubjectTransform.inputs.use_estimate_learning_rate_once = True
    ComputeAtlasToSubjectTransform.inputs.use_histogram_matching = True
    ComputeAtlasToSubjectTransform.inputs.invert_initial_moving_transform = False
    ComputeAtlasToSubjectTransform.inputs.output_transform_prefix = 'antsRegPrefix_'
    ComputeAtlasToSubjectTransform.inputs.output_warped_image = 'moving_to_fixed.nii.gz'
    ComputeAtlasToSubjectTransform.inputs.output_inverse_warped_image = 'fixed_to_moving.nii.gz'
    # ComputeAtlasToSubjectTransform.inputs.num_threads=-1
    # if os.environ.has_key('NSLOTS'):
    #    ComputeAtlasToSubjectTransform.inputs.num_threads=int(os.environ.has_key('NSLOTS'))
    # else:
    #    ComputeAtlasToSubjectTransform.inputs.num_threads=NumberOfThreads
    # ComputeAtlasToSubjectTransform.inputs.fixedMask=SUBJ_A_small_T2_mask.nii.gz
    # ComputeAtlasToSubjectTransform.inputs.movingMask=SUBJ_B_small_T2_mask.nii.gz

    ANTSWF.connect(inputsSpec, 'fixedVolumesList',
                   ComputeAtlasToSubjectTransform, "fixed_image")
    ANTSWF.connect(inputsSpec, 'movingVolumesList',
                   ComputeAtlasToSubjectTransform, "moving_image")
    ANTSWF.connect(BFitAtlasToSubject, 'outputTransform',
                   ComputeAtlasToSubjectTransform, 'initial_moving_transform')

    if 1 == 1:
        mergeAffineWarp = pe.Node(interface=Merge(2), name="Merge_AffineWarp")
        ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warp_transform',
                       mergeAffineWarp, 'in1')
        ANTSWF.connect(BFitAtlasToSubject, 'outputTransform', mergeAffineWarp,
                       'in2')

        from nipype.interfaces.ants import WarpImageMultiTransform
        debugWarpTest = pe.Node(interface=WarpImageMultiTransform(),
                                name="dbgWarpTest")
        # Not allowed as an input debugWarpTest.inputs.output_image = 'debugWarpedMovingToFixed.nii.gz'

        ANTSWF.connect(inputsSpec, 'fixedVolumesList', debugWarpTest,
                       'reference_image')
        ANTSWF.connect(inputsSpec, 'movingVolumesList', debugWarpTest,
                       'moving_image')
        ANTSWF.connect(mergeAffineWarp, 'out', debugWarpTest,
                       'transformation_series')

    #############
    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'warped_image', 'inverse_warped_image', 'warp_transform',
        'inverse_warp_transform', 'affine_transform'
    ]),
                          name='outputspec')

    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warped_image', outputsSpec,
                   'warped_image')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'inverse_warped_image',
                   outputsSpec, 'inverse_warped_image')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warp_transform',
                   outputsSpec, 'warp_transform')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'inverse_warp_transform',
                   outputsSpec, 'inverse_warp_transform')
    ANTSWF.connect(BFitAtlasToSubject, 'outputTransform', outputsSpec,
                   'affine_transform')

    return ANTSWF
コード例 #10
0
def register(warped_dir, atlas_image_brain, subject_T1ws_T2ws, subject_T2ws,
             n_jobs):

    input_spec = pe.Node(utility.IdentityInterface(
        fields=['subject_image_list', 'subject_image', 'atlas_image_brain']),
                         iterables=[('subject_image_list', subject_T1ws_T2ws),
                                    ('subject_image', subject_T2ws)],
                         synchronize=True,
                         name='input_spec')
    # set input_spec
    input_spec.inputs.subject_image_list = subject_T1ws_T2ws
    input_spec.inputs.subject_image = subject_T2ws
    input_spec.inputs.atlas_image_brain = atlas_image_brain
    '''
    CC[x, x, 1, 8]: [fixed, moving, weight, radius]cd 
    -t SyN[0.25]: Syn transform with a gradient step of 0.25
    -r Gauss[3, 0]: sigma 0
    -I 30x50x20
    use - Histogram - Matching
    number - of - affine - iterations 10000x10000x10000x10000: 4 level image pyramid with 10000 iterations at each level
    MI - option 32x16000: 32 bins, 16000 samples
    '''

    reg = pe.Node(
        ants.Registration(
            dimension=3,
            output_transform_prefix="output_",
            #interpolation='BSpline',
            transforms=['Affine', 'SyN'],
            transform_parameters=[(2.0, ), (0.25, )],  #default values syn
            shrink_factors=[[8, 4, 2, 1], [4, 2, 1]],
            smoothing_sigmas=[[3, 2, 1, 0], [2, 1, 0]],  #None for Syn?
            sigma_units=['vox'] * 2,
            sampling_percentage=[0.05, None],  #just use default?
            sampling_strategy=['Random', 'None'],
            number_of_iterations=[[10000, 10000, 10000, 10000], [30, 50, 20]],
            metric=['MI', 'CC'],
            metric_weight=[1, 1],
            radius_or_number_of_bins=[(32), (8)],
            #winsorize_lower_quantile=0.05,
            #winsorize_upper_quantile=0.95,
            verbose=True,
            use_histogram_matching=[True, True]),
        name='calc_registration')

    applytransforms = pe.Node(ants.ApplyTransforms(
        dimension=3, interpolation='NearestNeighbor'),
                              name='apply_warpfield')

    wf = pe.Workflow(name='wf', base_dir=warped_dir)

    wf.connect([
        (input_spec, reg, [('atlas_image_brain', 'moving_image'),
                           ('subject_image_list', 'fixed_image')
                           ]),  #create warp field to register atlas to subject
        (input_spec, applytransforms, [('atlas_image_brain', 'input_image'),
                                       ('subject_image', 'reference_image')]),
        (reg, applytransforms, [('forward_transforms', 'transforms')]
         )  #apply warpfield to register atlas brain to subject
    ])

    wf.config['execution']['parameterize_dirs'] = False

    wf.write_graph()
    output = wf.run(plugin='MultiProc', plugin_args={'n_procs': n_jobs})
コード例 #11
0
 def test_summary(self):
     # Create working dirs
     # Create XnatSource node
     repository = XnatRepo(
         server=SERVER, cache_dir=self.cache_dir,
         project_id=self.project)
     study = DummyStudy(
         self.SUMMARY_STUDY_NAME, repository, SingleProc('ad'),
         inputs=[
             InputFilesets('source1', 'source1', text_format),
             InputFilesets('source2', 'source2', text_format),
             InputFilesets('source3', 'source3', text_format)])
     # TODO: Should test out other file formats as well.
     source_files = ['source1', 'source2', 'source3']
     inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']),
                         'inputnode')
     inputnode.inputs.subject_id = self.SUBJECT
     inputnode.inputs.visit_id = self.VISIT
     source = pe.Node(
         RepositorySource(
             [study.bound_spec(f).collection for f in source_files]),
         name='source')
     subject_sink_files = ['subject_sink']
     dummy_pipeline = study.dummy_pipeline()
     dummy_pipeline.cap()
     subject_sink = pe.Node(
         RepositorySink(
             [study.bound_spec(f).collection for f in subject_sink_files],
             dummy_pipeline),
         name='subject_sink')
     subject_sink.inputs.name = 'subject_summary'
     subject_sink.inputs.desc = (
         "Tests the sinking of subject-wide filesets")
     # Test visit sink
     visit_sink_files = ['visit_sink']
     visit_sink = pe.Node(
         RepositorySink(
             [study.bound_spec(f).collection for f in visit_sink_files],
             dummy_pipeline),
         name='visit_sink')
     visit_sink.inputs.name = 'visit_summary'
     visit_sink.inputs.desc = (
         "Tests the sinking of visit-wide filesets")
     # Test project sink
     study_sink_files = ['study_sink']
     study_sink = pe.Node(
         RepositorySink(
             [study.bound_spec(f).collection for f in study_sink_files],
             dummy_pipeline),
         name='study_sink')
     study_sink.inputs.name = 'project_summary'
     study_sink.inputs.desc = (
         "Tests the sinking of project-wide filesets")
     # Create workflow connecting them together
     workflow = pe.Workflow('summary_unittest',
                            base_dir=self.work_dir)
     workflow.add_nodes((source, subject_sink, visit_sink,
                         study_sink))
     workflow.connect(inputnode, 'subject_id', source, 'subject_id')
     workflow.connect(inputnode, 'visit_id', source, 'visit_id')
     workflow.connect(inputnode, 'subject_id', subject_sink, 'subject_id')
     workflow.connect(inputnode, 'visit_id', visit_sink, 'visit_id')
     workflow.connect(
         source, 'source1' + PATH_SUFFIX,
         subject_sink, 'subject_sink' + PATH_SUFFIX)
     workflow.connect(
         source, 'source2' + PATH_SUFFIX,
         visit_sink, 'visit_sink' + PATH_SUFFIX)
     workflow.connect(
         source, 'source3' + PATH_SUFFIX,
         study_sink, 'study_sink' + PATH_SUFFIX)
     workflow.run()
     study.clear_caches()  # Refreshed cached repository tree object
     with self._connect() as login:
         # Check subject summary directories were created properly in cache
         expected_subj_filesets = ['subject_sink']
         subject_dir = self.session_cache(
             visit=XnatRepo.SUMMARY_NAME,
             from_study=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(subject_dir)),
                          expected_subj_filesets)
         # and on XNAT
         subject_fileset_names = filter_scans(login.projects[
             self.project].experiments[
                 self.session_label(
                     visit=XnatRepo.SUMMARY_NAME,
                     from_study=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_subj_filesets,
                          subject_fileset_names)
         # Check visit summary directories were created properly in
         # cache
         expected_visit_filesets = ['visit_sink']
         visit_dir = self.session_cache(
             subject=XnatRepo.SUMMARY_NAME,
             from_study=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(visit_dir)),
                          expected_visit_filesets)
         # and on XNAT
         visit_fileset_names = filter_scans(login.projects[
             self.project].experiments[
                 self.session_label(
                     subject=XnatRepo.SUMMARY_NAME,
                     from_study=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_visit_filesets, visit_fileset_names)
         # Check project summary directories were created properly in cache
         expected_proj_filesets = ['study_sink']
         project_dir = self.session_cache(
             subject=XnatRepo.SUMMARY_NAME,
             visit=XnatRepo.SUMMARY_NAME,
             from_study=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(project_dir)),
                          expected_proj_filesets)
         # and on XNAT
         project_fileset_names = filter_scans(login.projects[
             self.project].experiments[
                 self.session_label(
                     subject=XnatRepo.SUMMARY_NAME,
                     visit=XnatRepo.SUMMARY_NAME,
                     from_study=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_proj_filesets, project_fileset_names)
     # Reload the data from the summary directories
     reloadinputnode = pe.Node(IdentityInterface(['subject_id',
                                                  'visit_id']),
                               'reload_inputnode')
     reloadinputnode.inputs.subject_id = self.SUBJECT
     reloadinputnode.inputs.visit_id = self.VISIT
     reloadsource_per_subject = pe.Node(
         RepositorySource(
             study.bound_spec(f).collection for f in subject_sink_files),
         name='reload_source_per_subject')
     reloadsource_per_visit = pe.Node(
         RepositorySource(
             study.bound_spec(f).collection for f in visit_sink_files),
         name='reload_source_per_visit')
     reloadsource_per_study = pe.Node(
         RepositorySource(
             study.bound_spec(f).collection for f in study_sink_files),
         name='reload_source_per_study')
     reloadsink = pe.Node(
         RepositorySink(
             (study.bound_spec(f).collection
              for f in ['resink1', 'resink2', 'resink3']),
             dummy_pipeline),
         name='reload_sink')
     reloadsink.inputs.name = 'reload_summary'
     reloadsink.inputs.desc = (
         "Tests the reloading of subject and project summary filesets")
     reloadworkflow = pe.Workflow('reload_summary_unittest',
                                  base_dir=self.work_dir)
     for node in (reloadsource_per_subject, reloadsource_per_visit,
                  reloadsource_per_study, reloadsink):
         for iterator in ('subject_id', 'visit_id'):
             reloadworkflow.connect(reloadinputnode, iterator,
                                    node, iterator)
     reloadworkflow.connect(reloadsource_per_subject,
                            'subject_sink' + PATH_SUFFIX,
                            reloadsink,
                            'resink1' + PATH_SUFFIX)
     reloadworkflow.connect(reloadsource_per_visit,
                            'visit_sink' + PATH_SUFFIX,
                            reloadsink,
                            'resink2' + PATH_SUFFIX)
     reloadworkflow.connect(reloadsource_per_study,
                            'study_sink' + PATH_SUFFIX,
                            reloadsink,
                            'resink3' + PATH_SUFFIX)
     reloadworkflow.run()
     # Check that the filesets
     self.assertEqual(
         filter_scans(os.listdir(self.session_cache(
             from_study=self.SUMMARY_STUDY_NAME))),
         ['resink1', 'resink2', 'resink3'])
     # and on XNAT
     with self._connect() as login:
         resinked_fileset_names = filter_scans(login.projects[
             self.project].experiments[
                 self.session_label(
                     from_study=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(sorted(resinked_fileset_names),
                          ['resink1', 'resink2', 'resink3'])
コード例 #12
0
def create_func_preproc(use_bet=False, wf_name='func_preproc'):
    """

    The main purpose of this workflow is to process functional data. Raw rest file is deobliqued and reoriented
    into RPI. Then take the mean intensity values over all time points for each voxel and use this image
    to calculate motion parameters. The image is then skullstripped, normalized and a processed mask is
    obtained to use it further in Image analysis.

    Parameters
    ----------

    wf_name : string
        Workflow name

    Returns
    -------
    func_preproc : workflow object
        Functional Preprocessing workflow object

    Notes
    -----

    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/func_preproc/func_preproc.py>`_

    Workflow Inputs::

        inputspec.rest : func/rest file or a list of func/rest nifti file
            User input functional(T2) Image, in any of the 8 orientations

        scan_params.tr : string
            Subject TR

        scan_params.acquistion : string
            Acquisition pattern (interleaved/sequential, ascending/descending)

        scan_params.ref_slice : integer
            Reference slice for slice timing correction

    Workflow Outputs::

        outputspec.refit : string (nifti file)
            Path to deobliqued anatomical data

        outputspec.reorient : string (nifti file)
            Path to RPI oriented anatomical data

        outputspec.motion_correct_ref : string (nifti file)
             Path to Mean intensity Motion corrected image
             (base reference image for the second motion correction run)

        outputspec.motion_correct : string (nifti file)
            Path to motion corrected output file

        outputspec.max_displacement : string (Mat file)
            Path to maximum displacement (in mm) for brain voxels in each volume

        outputspec.movement_parameters : string (Mat file)
            Path to 1D file containing six movement/motion parameters(3 Translation, 3 Rotations)
            in different columns (roll pitch yaw dS  dL  dP)

        outputspec.skullstrip : string (nifti file)
            Path to skull stripped Motion Corrected Image

        outputspec.mask : string (nifti file)
            Path to brain-only mask

        outputspec.example_func : string (nifti file)
            Mean, Skull Stripped, Motion Corrected output T2 Image path
            (Image with mean intensity values across voxels)

        outputpsec.preprocessed : string (nifti file)
            output skull stripped, motion corrected T2 image
            with normalized intensity values

        outputspec.preprocessed_mask : string (nifti file)
           Mask obtained from normalized preprocessed image

    Order of commands:

    - Deobliqing the scans.  For details see `3drefit <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>`_::

        3drefit -deoblique rest_3dc.nii.gz

    - Re-orienting the Image into Right-to-Left Posterior-to-Anterior Inferior-to-Superior (RPI) orientation. For details see `3dresample <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dresample.html>`_::

        3dresample -orient RPI
                   -prefix rest_3dc_RPI.nii.gz
                   -inset rest_3dc.nii.gz

    - Calculate voxel wise statistics. Get the RPI Image with mean intensity values over all timepoints for each voxel. For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean
                -prefix rest_3dc_RPI_3dT.nii.gz
                rest_3dc_RPI.nii.gz

    - Motion Correction. For details see `3dvolreg <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_::

        3dvolreg -Fourier
                 -twopass
                 -base rest_3dc_RPI_3dT.nii.gz/
                 -zpad 4
                 -maxdisp1D rest_3dc_RPI_3dvmd1D.1D
                 -1Dfile rest_3dc_RPI_3dv1D.1D
                 -prefix rest_3dc_RPI_3dv.nii.gz
                 rest_3dc_RPI.nii.gz

      The base image or the reference image is the mean intensity RPI image obtained in the above the step.For each volume
      in RPI-oriented T2 image, the command, aligns the image with the base mean image and calculates the motion, displacement
      and movement parameters. It also outputs the aligned 4D volume and movement and displacement parameters for each volume.

    - Calculate voxel wise statistics. Get the motion corrected output Image from the above step, with mean intensity values over all timepoints for each voxel.
      For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean
                -prefix rest_3dc_RPI_3dv_3dT.nii.gz
                rest_3dc_RPI_3dv.nii.gz

    - Motion Correction and get motion, movement and displacement parameters. For details see `3dvolreg <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_::

        3dvolreg -Fourier
                 -twopass
                 -base rest_3dc_RPI_3dv_3dT.nii.gz
                 -zpad 4
                 -maxdisp1D rest_3dc_RPI_3dvmd1D.1D
                 -1Dfile rest_3dc_RPI_3dv1D.1D
                 -prefix rest_3dc_RPI_3dv.nii.gz
                 rest_3dc_RPI.nii.gz

      The base image or the reference image is the mean intensity motion corrected image obtained from the above the step (first 3dvolreg run).
      For each volume in RPI-oriented T2 image, the command, aligns the image with the base mean image and calculates the motion, displacement
      and movement parameters. It also outputs the aligned 4D volume and movement and displacement parameters for each volume.

    - Create a  brain-only mask. For details see `3dautomask <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutomask.html>`_::

        3dAutomask
                   -prefix rest_3dc_RPI_3dv_automask.nii.gz
                   rest_3dc_RPI_3dv.nii.gz

    - Edge Detect(remove skull) and get the brain only. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::

        3dcalc -a rest_3dc_RPI_3dv.nii.gz
               -b rest_3dc_RPI_3dv_automask.nii.gz
               -expr 'a*b'
               -prefix rest_3dc_RPI_3dv_3dc.nii.gz

    - Normalizing the image intensity values. For details see `fslmaths <http://www.fmrib.ox.ac.uk/fsl/avwutils/index.html>`_::

        fslmaths rest_3dc_RPI_3dv_3dc.nii.gz
                 -ing 10000 rest_3dc_RPI_3dv_3dc_maths.nii.gz
                 -odt float

      Normalized intensity = (TrueValue*10000)/global4Dmean

    - Calculate mean of skull stripped image. For details see `3dTstat <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_::

        3dTstat -mean -prefix rest_3dc_RPI_3dv_3dc_3dT.nii.gz rest_3dc_RPI_3dv_3dc.nii.gz

    - Create Mask (Generate mask from Normalized data). For details see `fslmaths <http://www.fmrib.ox.ac.uk/fsl/avwutils/index.html>`_::

        fslmaths rest_3dc_RPI_3dv_3dc_maths.nii.gz
               -Tmin -bin rest_3dc_RPI_3dv_3dc_maths_maths.nii.gz
               -odt char

    High Level Workflow Graph:

    .. image:: ../images/func_preproc.dot.png
       :width: 1000


    Detailed Workflow Graph:

    .. image:: ../images/func_preproc_detailed.dot.png
       :width: 1000

    Examples
    --------

    >>> import func_preproc
    >>> preproc = create_func_preproc(bet=True)
    >>> preproc.inputs.inputspec.func='sub1/func/rest.nii.gz'
    >>> preproc.run() #doctest: +SKIP


    >>> import func_preproc
    >>> preproc = create_func_preproc(bet=False)
    >>> preproc.inputs.inputspec.func='sub1/func/rest.nii.gz'
    >>> preproc.run() #doctest: +SKIP

    """

    preproc = pe.Workflow(name=wf_name)
    inputNode = pe.Node(util.IdentityInterface(fields=['func']),
                        name='inputspec')

    outputNode = pe.Node(
        util.IdentityInterface(fields=[
            'refit',
            'reorient',
            'reorient_mean',
            'motion_correct',
            'motion_correct_ref',
            'movement_parameters',
            'max_displacement',
            #'xform_matrix',
            'mask',
            'skullstrip',
            'example_func',
            'preprocessed',
            'preprocessed_mask',
            'slice_time_corrected',
            'oned_matrix_save'
        ]),
        name='outputspec')

    func_deoblique = pe.Node(interface=preprocess.Refit(),
                             name='func_deoblique')
    func_deoblique.inputs.deoblique = True

    preproc.connect(inputNode, 'func', func_deoblique, 'in_file')

    func_reorient = pe.Node(interface=preprocess.Resample(),
                            name='func_reorient')
    func_reorient.inputs.orientation = 'RPI'
    func_reorient.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_deoblique, 'out_file', func_reorient, 'in_file')

    preproc.connect(func_reorient, 'out_file', outputNode, 'reorient')

    func_get_mean_RPI = pe.Node(interface=preprocess.TStat(),
                                name='func_get_mean_RPI')
    func_get_mean_RPI.inputs.options = '-mean'
    func_get_mean_RPI.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_reorient, 'out_file', func_get_mean_RPI, 'in_file')

    #calculate motion parameters
    func_motion_correct = pe.Node(interface=preprocess.Volreg(),
                                  name='func_motion_correct')
    func_motion_correct.inputs.args = '-Fourier -twopass'
    func_motion_correct.inputs.zpad = 4
    func_motion_correct.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_reorient, 'out_file', func_motion_correct, 'in_file')
    preproc.connect(func_get_mean_RPI, 'out_file', func_motion_correct,
                    'basefile')

    func_get_mean_motion = func_get_mean_RPI.clone('func_get_mean_motion')
    preproc.connect(func_motion_correct, 'out_file', func_get_mean_motion,
                    'in_file')

    preproc.connect(func_get_mean_motion, 'out_file', outputNode,
                    'motion_correct_ref')

    func_motion_correct_A = func_motion_correct.clone('func_motion_correct_A')
    func_motion_correct_A.inputs.md1d_file = 'max_displacement.1D'

    preproc.connect(func_reorient, 'out_file', func_motion_correct_A,
                    'in_file')
    preproc.connect(func_get_mean_motion, 'out_file', func_motion_correct_A,
                    'basefile')

    preproc.connect(func_motion_correct_A, 'out_file', outputNode,
                    'motion_correct')
    preproc.connect(func_motion_correct_A, 'md1d_file', outputNode,
                    'max_displacement')
    preproc.connect(func_motion_correct_A, 'oned_file', outputNode,
                    'movement_parameters')
    preproc.connect(func_motion_correct_A, 'oned_matrix_save', outputNode,
                    'oned_matrix_save')

    if use_bet == False:

        func_get_brain_mask = pe.Node(interface=preprocess.Automask(),
                                      name='func_get_brain_mask')

        func_get_brain_mask.inputs.outputtype = 'NIFTI_GZ'

        preproc.connect(func_motion_correct_A, 'out_file', func_get_brain_mask,
                        'in_file')

        preproc.connect(func_get_brain_mask, 'out_file', outputNode, 'mask')

    else:

        func_get_brain_mask = pe.Node(interface=fsl.BET(),
                                      name='func_get_brain_mask_BET')

        func_get_brain_mask.inputs.mask = True
        func_get_brain_mask.inputs.functional = True

        erode_one_voxel = pe.Node(interface=fsl.ErodeImage(),
                                  name='erode_one_voxel')

        erode_one_voxel.inputs.kernel_shape = 'box'
        erode_one_voxel.inputs.kernel_size = 1.0

        preproc.connect(func_motion_correct_A, 'out_file', func_get_brain_mask,
                        'in_file')

        preproc.connect(func_get_brain_mask, 'mask_file', erode_one_voxel,
                        'in_file')

        preproc.connect(erode_one_voxel, 'out_file', outputNode, 'mask')

    func_edge_detect = pe.Node(interface=preprocess.Calc(),
                               name='func_edge_detect')
    func_edge_detect.inputs.expr = 'a*b'
    func_edge_detect.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_motion_correct_A, 'out_file', func_edge_detect,
                    'in_file_a')

    if use_bet == False:

        preproc.connect(func_get_brain_mask, 'out_file', func_edge_detect,
                        'in_file_b')

    else:

        preproc.connect(erode_one_voxel, 'out_file', func_edge_detect,
                        'in_file_b')

    preproc.connect(func_edge_detect, 'out_file', outputNode, 'skullstrip')

    func_mean_skullstrip = pe.Node(interface=preprocess.TStat(),
                                   name='func_mean_skullstrip')
    func_mean_skullstrip.inputs.options = '-mean'
    func_mean_skullstrip.inputs.outputtype = 'NIFTI_GZ'

    preproc.connect(func_edge_detect, 'out_file', func_mean_skullstrip,
                    'in_file')

    preproc.connect(func_mean_skullstrip, 'out_file', outputNode,
                    'example_func')

    func_normalize = pe.Node(interface=fsl.ImageMaths(), name='func_normalize')
    func_normalize.inputs.op_string = '-ing 10000'
    func_normalize.inputs.out_data_type = 'float'

    preproc.connect(func_edge_detect, 'out_file', func_normalize, 'in_file')

    preproc.connect(func_normalize, 'out_file', outputNode, 'preprocessed')

    func_mask_normalize = pe.Node(interface=fsl.ImageMaths(),
                                  name='func_mask_normalize')
    func_mask_normalize.inputs.op_string = '-Tmin -bin'
    func_mask_normalize.inputs.out_data_type = 'char'

    preproc.connect(func_normalize, 'out_file', func_mask_normalize, 'in_file')

    preproc.connect(func_mask_normalize, 'out_file', outputNode,
                    'preprocessed_mask')

    return preproc
コード例 #13
0
def create_wf_edit_func(wf_name="edit_func"):
    """
    Workflow Inputs::

        inputspec.func : func file or a list of func/rest nifti file
            User input functional(T2*) Image

        inputspec.start_idx : string
            Starting volume/slice of the functional image (optional)

        inputspec.stop_idx : string
            Last volume/slice of the functional image (optional)

    Workflow Outputs::

        outputspec.edited_func : string (nifti file)
            Path to Output image with the initial few slices dropped


    Order of commands:

    - Get the start and the end volume index of the functional run. If not defined by the user, return the first and last volume.

        get_idx(in_files, stop_idx, start_idx)

    - Dropping the initial TRs. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::

        3dcalc -a rest.nii.gz[4..299]
               -expr 'a'
               -prefix rest_3dc.nii.gz

    """

    # allocate a workflow object
    try:
        preproc = pe.Workflow(name=wf_name)
    except:
        logger.info( "Error allocating workflow %s."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    # configure the workflow's input spec
    try:
        inputNode = pe.Node(
            util.IdentityInterface(fields=['func', 'start_idx', 'stop_idx']),
            name='inputspec')
    except:
        logger.info( "Error allocating inputspec (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    # configure the workflow's output spec
    try:
        outputNode = pe.Node(util.IdentityInterface(fields=['edited_func']),
                             name='outputspec')
    except:
        logger.info( "Error allocating output spec (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    # allocate a node to check that the requested edits are
    # reasonable given the data
    try:
        func_get_idx = pe.Node(util.Function(
            input_names=['in_files', 'stop_idx', 'start_idx'],
            output_names=['stopidx', 'startidx'],
            function=get_idx),
                               name='func_get_idx')
    except:
        logger.info( "Error allocating get_idx function node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    # wire in the func_get_idx node
    try:
        preproc.connect(inputNode, 'func', func_get_idx, 'in_files')
    except:
        logger.info( "Error connecting 'in_files' input to get_idx function node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        preproc.connect(inputNode, 'start_idx', func_get_idx, 'start_idx')
    except:
        logger.info( "Error connecting 'start_idx' input to get_idx function node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        preproc.connect(inputNode, 'stop_idx', func_get_idx, 'stop_idx')
    except:
        logger.info( "Error connecting 'stop_idx' input to get_idx function node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        # allocate a node to edit the functional file
        func_drop_trs = pe.Node(interface=preprocess.Calc(),
                                name='func_drop_trs')
        func_drop_trs.inputs.expr = 'a'
        func_drop_trs.inputs.outputtype = 'NIFTI_GZ'
    except:
        logger.info( "Error allocating afni Calc node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    # wire in the inpus
    try:
        preproc.connect(inputNode, 'func', func_drop_trs, 'in_file_a')
    except:
        logger.info( "Error connecting 'in_file_a' input to afni Calc node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        preproc.connect(func_get_idx, 'startidx', func_drop_trs, 'start_idx')
    except:
        logger.info( "Error connecting 'start_idx' input to afni Calc node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        preproc.connect(func_get_idx, 'stopidx', func_drop_trs, 'stop_idx')
    except:
        logger.info( "Error connecting 'stop_idx' input to afni Calc node (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    try:
        # wire the output
        preproc.connect(func_drop_trs, 'out_file', outputNode, 'edited_func')
    except:
        logger.info( "Error connecting output (wflow %s)."+\
                     " (%s:%d)" % (wf_name, dbg_file_lineno() ))
        raise

    return preproc
コード例 #14
0
def anat_qc_workflow(name='MRIQC_Anat', settings=None):
    """
    One-subject-one-session-one-run pipeline to extract the NR-IQMs from
    anatomical images
    """
    if settings is None:
        settings = {}

    workflow = pe.Workflow(name=name)
    deriv_dir = op.abspath('./derivatives')
    if 'work_dir' in settings.keys():
        deriv_dir = op.abspath(op.join(settings['work_dir'], 'derivatives'))

    if not op.exists(deriv_dir):
        os.makedirs(deriv_dir)
    # Define workflow, inputs and outputs
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bids_root', 'subject_id', 'session_id', 'run_id']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['out_json']),
                         name='outputnode')

    # 0. Get data
    datasource = pe.Node(niu.Function(input_names=[
        'bids_root', 'data_type', 'subject_id', 'session_id', 'run_id'
    ],
                                      output_names=['anatomical_scan'],
                                      function=bids_getfile),
                         name='datasource')
    datasource.inputs.data_type = 'anat'

    # 1a. Reorient anatomical image
    arw = mri_reorient_wf()
    # 1b. Estimate bias
    n4itk = pe.Node(ants.N4BiasFieldCorrection(dimension=3, save_bias=True),
                    name='Bias')
    # 2. Skull-stripping (afni)
    asw = skullstrip_wf()
    mask = pe.Node(fsl.ApplyMask(), name='MaskAnatomical')
    # 3. Head mask (including nasial-cerebelum mask)
    hmsk = headmsk_wf()
    # 4. Air mask (with and without artifacts)
    amw = airmsk_wf(save_memory=settings.get('save_memory', False),
                    ants_settings=settings.get('ants_settings', None))

    # Brain tissue segmentation
    segment = pe.Node(fsl.FAST(img_type=1,
                               segments=True,
                               out_basename='segment'),
                      name='segmentation')

    # AFNI check smoothing
    fwhm = pe.Node(afp.FWHMx(combine=True, detrend=True), name='smoothness')
    # fwhm.inputs.acf = True  # add when AFNI >= 16

    # Compute python-coded measures
    measures = pe.Node(StructuralQC(), 'measures')

    # Plot mosaic
    plot = pe.Node(PlotMosaic(), name='plot_mosaic')
    merg = pe.Node(niu.Merge(3), name='plot_metadata')

    # Connect all nodes
    workflow.connect([
        (inputnode, datasource, [('bids_root', 'bids_root'),
                                 ('subject_id', 'subject_id'),
                                 ('session_id', 'session_id'),
                                 ('run_id', 'run_id')]),
        (datasource, arw, [('anatomical_scan', 'inputnode.in_file')]),
        (arw, asw, [('outputnode.out_file', 'inputnode.in_file')]),
        (arw, n4itk, [('outputnode.out_file', 'input_image')]),
        # (asw, n4itk, [('outputnode.out_mask', 'mask_image')]),
        (n4itk, mask, [('output_image', 'in_file')]),
        (asw, mask, [('outputnode.out_mask', 'mask_file')]),
        (mask, segment, [('out_file', 'in_files')]),
        (n4itk, hmsk, [('output_image', 'inputnode.in_file')]),
        (segment, hmsk, [('tissue_class_map', 'inputnode.in_segm')]),
        (n4itk, measures, [('output_image', 'in_noinu')]),
        (arw, measures, [('outputnode.out_file', 'in_file')]),
        (arw, fwhm, [('outputnode.out_file', 'in_file')]),
        (asw, fwhm, [('outputnode.out_mask', 'mask')]),
        (arw, amw, [('outputnode.out_file', 'inputnode.in_file')]),
        (n4itk, amw, [('output_image', 'inputnode.in_noinu')]),
        (asw, amw, [('outputnode.out_mask', 'inputnode.in_mask')]),
        (hmsk, amw, [('outputnode.out_file', 'inputnode.head_mask')]),
        (amw, measures, [('outputnode.out_file', 'air_msk')]),
        (amw, measures, [('outputnode.artifact_msk', 'artifact_msk')]),
        (segment, measures, [('tissue_class_map', 'in_segm'),
                             ('partial_volume_files', 'in_pvms')]),
        (n4itk, measures, [('bias_image', 'in_bias')]),
        (arw, plot, [('outputnode.out_file', 'in_file')]),
        (inputnode, plot, [('subject_id', 'subject')]),
        (inputnode, merg, [('session_id', 'in1'), ('run_id', 'in2')]),
        (merg, plot, [('out', 'metadata')])
    ])

    if settings.get('mask_mosaic', False):
        workflow.connect(asw, 'outputnode.out_file', plot, 'in_mask')

    # Save mosaic to well-formed path
    mvplot = pe.Node(niu.Rename(
        format_string='anatomical_%(subject_id)s_%(session_id)s_%(run_id)s',
        keep_ext=True),
                     name='rename_plot')
    dsplot = pe.Node(nio.DataSink(base_directory=settings['work_dir'],
                                  parameterization=False),
                     name='ds_plot')
    workflow.connect([(inputnode, mvplot, [('subject_id', 'subject_id'),
                                           ('session_id', 'session_id'),
                                           ('run_id', 'run_id')]),
                      (plot, mvplot, [('out_file', 'in_file')]),
                      (mvplot, dsplot, [('out_file', '@mosaic')])])

    # Save background-noise fitting plot
    mvbgplot = pe.Node(niu.Rename(
        format_string=
        'anatomical_bgplot_%(subject_id)s_%(session_id)s_%(run_id)s',
        keep_ext=True),
                       name='rename_bgplot')
    dsbgplot = pe.Node(nio.DataSink(base_directory=settings['work_dir'],
                                    parameterization=False),
                       name='ds_bgplot')
    workflow.connect([(inputnode, mvbgplot, [('subject_id', 'subject_id'),
                                             ('session_id', 'session_id'),
                                             ('run_id', 'run_id')]),
                      (measures, mvbgplot, [('out_noisefit', 'in_file')]),
                      (mvbgplot, dsbgplot, [('out_file', '@bg_fitting')])])

    # Format name
    out_name = pe.Node(niu.Function(
        input_names=['subid', 'sesid', 'runid', 'prefix', 'out_path'],
        output_names=['out_file'],
        function=bids_path),
                       name='FormatName')
    out_name.inputs.out_path = deriv_dir
    out_name.inputs.prefix = 'anat'

    # Save to JSON file
    jfs_if = nio.JSONFileSink()
    setattr(jfs_if, '_always_run', settings.get('force_run', False))
    datasink = pe.Node(jfs_if, name='datasink')
    datasink.inputs.qc_type = 'anat'

    workflow.connect([(inputnode, out_name, [('subject_id', 'subid'),
                                             ('session_id', 'sesid'),
                                             ('run_id', 'runid')]),
                      (inputnode, datasink, [('subject_id', 'subject_id'),
                                             ('session_id', 'session_id'),
                                             ('run_id', 'run_id')]),
                      (plot, datasink, [('out_file', 'mosaic_file')]),
                      (fwhm, datasink, [(('fwhm', fwhm_dict), 'fwhm')]),
                      (measures, datasink, [('summary', 'summary'),
                                            ('spacing', 'spacing'),
                                            ('size', 'size'), ('icvs', 'icvs'),
                                            ('rpve', 'rpve'), ('inu', 'inu'),
                                            ('snr', 'snr'), ('cnr', 'cnr'),
                                            ('fber', 'fber'), ('efc', 'efc'),
                                            ('qi1', 'qi1'), ('qi2', 'qi2'),
                                            ('cjv', 'cjv')]),
                      (out_name, datasink, [('out_file', 'out_file')]),
                      (datasink, outputnode, [('out_file', 'out_file')])])
    return workflow
コード例 #15
0
def main(derivatives, ds):

    if ds == 'ds-01':
        subjects = ['{:02d}'.format(s) for s in range(1, 20)]
    elif ds == 'ds-02':
        subjects = ['{:02d}'.format(s) for s in range(1, 16)]
        subjects.pop(3)  # Remove 4

    wf_folder = '/tmp/workflow_folders'

    identity = pe.Node(niu.IdentityInterface(fields=['mask']), name='identity')

    templates = {
        'pca_map':
        op.join(derivatives, ds, 'pca_mni', '{mask}_pca.nii.gz'),
        't1w':
        op.join(derivatives, ds, 'fmriprep', 'sub-{subject}', 'anat',
                'sub-{subject}_desc-preproc_T1w.nii.gz'),
        'mni2t1w':
        op.join(
            derivatives, ds, 'fmriprep', 'sub-{subject}', 'anat',
            'sub-{subject}_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5')
    }

    if ds == 'ds-01':
        templates['individual_mask'] = op.join(
            derivatives, ds, 'conjunct_masks', 'sub-{subject}', 'anat',
            'sub-{subject}_space-FLASH_desc-{mask}_space-T1w.nii.gz')

    elif ds == 'ds-02':
        templates['individual_mask'] = op.join(
            derivatives, ds, 'conjunct_masks', 'sub-{subject}', 'anat',
            'sub-{subject}_desc-{mask}_mask.nii.gz')

    wf = pe.Workflow(name='make_pca_masks_{}'.format(ds), base_dir=wf_folder)

    selector = pe.Node(nio.SelectFiles(templates), name='selector')
    selector.iterables = [('mask', ['stnl', 'stnr']), ('subject', subjects)]

    individual_pca_map = pe.Node(ants.ApplyTransforms(num_threads=4),
                                 name='individual_pca_map')

    wf.connect(selector, 't1w', individual_pca_map, 'reference_image')
    wf.connect(selector, 'pca_map', individual_pca_map, 'input_image')
    wf.connect(selector, 'mni2t1w', individual_pca_map, 'transforms')

    def make_pca_mask(pca_map, mask):
        from nilearn import image
        from nipype.utils.filemanip import split_filename
        import os.path as op

        _, fn, ext = split_filename(mask)

        pca_map = image.load_img(pca_map)
        mask = image.load_img(mask)

        pca_map = image.resample_to_img(pca_map, mask, interpolation='nearest')

        new_mask = image.math_img('pca_map * (mask > 0)',
                                  pca_map=pca_map,
                                  mask=mask)

        tmp = new_mask.get_data()
        tmp[tmp != 0] -= tmp[tmp != 0].min() - 1e-4
        tmp[tmp != 0] /= tmp[tmp != 0].max()

        new_mask = image.new_img_like(new_mask, tmp)

        new_mask.to_filename(op.abspath('{}_map{}'.format(fn, ext)))

        return new_mask.get_filename()

    make_mask = pe.Node(niu.Function(function=make_pca_mask,
                                     input_names=['pca_map', 'mask'],
                                     output_names=['mask']),
                        name='make_mask')

    wf.connect(individual_pca_map, 'output_image', make_mask, 'pca_map')
    wf.connect(selector, 'individual_mask', make_mask, 'mask')

    def make_submask(mask):
        from nilearn import image
        import numpy as np
        import os.path as op
        from nipype.utils.filemanip import split_filename

        _, fn, ext = split_filename(mask)

        im = image.load_img(mask)

        data = im.get_data()
        percentiles = np.percentile(data[data != 0], [33, 66])

        mask1 = image.math_img('(im > 0) & (im < {})'.format(percentiles[0]),
                               im=im)
        mask2 = image.math_img('(im > {}) & (im < {})'.format(*percentiles),
                               im=im)
        mask3 = image.math_img('(im > {})'.format(percentiles[1]), im=im)

        fn1 = op.abspath('{}_maskA{}'.format(fn, ext))
        fn2 = op.abspath('{}_maskB{}'.format(fn, ext))
        fn3 = op.abspath('{}_maskC{}'.format(fn, ext))

        mask1.to_filename(fn1)
        mask2.to_filename(fn2)
        mask3.to_filename(fn3)

        return fn3, fn2, fn1

    make_submasksnode = pe.Node(niu.Function(function=make_submask,
                                             input_names=['mask'],
                                             output_names=['submasks']),
                                name='make_submasks')

    wf.connect(make_mask, 'mask', make_submasksnode, 'mask')

    datasink_whole_mask = pe.Node(DerivativesDataSink(
        base_directory=op.join(derivatives, ds),
        space='T1w',
        suffix='roi',
        out_path_base='pca_masks'),
                                  name='datasink_whole_mask')
    datasink_whole_mask.base_path = 'pca_masks'

    def remove_space(input):
        return input.replace('_space-FLASH', '')

    wf.connect(selector, ('individual_mask', remove_space),
               datasink_whole_mask, 'source_file')
    wf.connect(make_mask, 'mask', datasink_whole_mask, 'in_file')

    datasink_submasks = pe.MapNode(DerivativesDataSink(
        base_directory=op.join(derivatives, ds),
        space='T1w',
        out_path_base='pca_masks'),
                                   iterfield=['suffix', 'in_file'],
                                   name='datasink_submasks')
    datasink_submasks.base_path = 'pca_masks'
    datasink_submasks.inputs.suffix = [
        'subroi-A_roi', 'subroi-B_roi', 'subroi-C_roi'
    ]
    wf.connect(selector, ('individual_mask', remove_space), datasink_submasks,
               'source_file')
    wf.connect(make_submasksnode, 'submasks', datasink_submasks, 'in_file')

    wf.run(plugin='MultiProc', plugin_args={'n_procs': 8})
コード例 #16
0
def run_workflow(csv_file, fwhm, HighPass, undist):
    # Using the name "level1flow" should allow the workingdirs file to be used
    # by the fmri_workflow pipeline.
    workflow = pe.Workflow(name='level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    featpreproc = create_workflow(undist)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
        'refsubject_id',
    ]),
                        name="input")

    if csv_file is not None:
        print('=== reading csv ===')
        # Read csv and use pandas to set-up image and ev-processing
        df = pd.read_csv(csv_file)
        # init lists
        sub_img = []
        ses_img = []
        run_img = []
        ref_img = []

        # fill lists to iterate mapnodes
        for index, row in df.iterrows():
            for r in row.run.strip("[]").split(" "):
                sub_img.append(row.subject)
                ses_img.append(row.session)
                run_img.append(r)
                if 'refsubject' in df.columns:
                    if row.refsubject == 'nan':
                        # empty field
                        ref_img.append(row.subject)
                    else:
                        # non-empty field
                        ref_img.append(row.refsubject)
                else:
                    ref_img.append(row.subject)

        inputnode.iterables = [
            ('subject_id', sub_img),
            ('session_id', ses_img),
            ('run_id', run_img),
            ('refsubject_id', ref_img),
        ]
        inputnode.synchronize = True
        print(sub_img)
        print(ref_img)
    else:
        print("No csv-file specified. Don't know what data to process.")

    # use undistorted epi's if these are requested (need to be generated with undistort workflow)
    if undist:
        func_fld = 'undistort'
        func_flag = 'preproc_undistort'
    else:
        func_fld = 'resampled-isotropic-1mm'
        func_flag = 'preproc'

    templates = {
        'funcs':
        'derivatives/' + func_fld + '/'
        'sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}_bold_res-1x1x1_' +
        func_flag + '.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name="input_files")

    workflow.connect([(inputnode, inputfiles, [
        ('subject_id', 'subject_id'),
        ('refsubject_id', 'refsubject_id'),
        ('session_id', 'session_id'),
        ('run_id', 'run_id'),
    ]),
                      (inputnode, featpreproc, [
                          ('subject_id', 'inputspec.subject_id'),
                          ('refsubject_id', 'inputspec.refsubject_id'),
                          ('session_id', 'inputspec.session_id'),
                      ]),
                      (inputfiles, featpreproc, [
                          ('funcs', 'inputspec.funcs'),
                      ])])

    featpreproc.inputs.inputspec.fwhm = fwhm  # spatial smoothing (default=2)
    featpreproc.inputs.inputspec.highpass = HighPass  # FWHM in seconds (default=50)

    from nipype import config, logging
    config.update_config(
        {'logging': {
            'workflow_level': 'INFO',
            'interface_level': 'INFO',
        }})
    logging.update_logging(config)
    #config.enable_debug_mode() << uncomment for massive output of info

    # redundant with enable_debug_mode() ...
    workflow.workflow_level = 'INFO'  # INFO/DEBUG
    # workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = True
    workflow.write_graph()
    workflow.run()
コード例 #17
0
ファイル: masking.py プロジェクト: llevitis/APPIAN
def get_workflow(name, infosource, opts):
    '''
        Create workflow to produce labeled images.

        1. Invert T1 Native to MNI 152 transformation
        2. Transform
        4. Transform brainmask from MNI 152 to T1 native
        5. Create PVC labeled image
        6. Create quantification labeled image
        7. Create results labeled image

        :param name: Name for workflow
        :param infosource: Infosource for basic variables like subject id (sid) and condition id (cid)
        :param datasink: Node in which output data is sent
        :param opts: User options

        :returns: workflow
    '''
    workflow = pe.Workflow(name=name)
    out_list = [
        "pet_brainmask", "brain_mask", "results_label_img_t1",
        "results_label_img_mni"
    ]
    in_list = [
        "nativeT1", "mniT1", "brainmask", "pet_header_json", "pet_volume",
        "results_labels", "results_label_template", "results_label_img",
        'LinT1MNIXfm', 'LinMNIT1Xfm', "LinPETMNIXfm", "LinMNIPETXfm",
        'LinT1MNIXfm', "LinT1PETXfm", "LinPETT1Xfm", "surf_left", 'surf_right'
    ]
    if not opts.nopvc:
        out_list += ["pvc_label_img_t1", "pvc_label_img_mni"]
        in_list += [
            "pvc_labels", "pvc_label_space", "pvc_label_img",
            "pvc_label_template"
        ]
    if not opts.tka_method == None:
        out_list += ["tka_label_img_t1", "tka_label_img_mni"]
        in_list += [
            "tka_labels", "tka_label_space", "tka_label_template",
            "tka_label_img"
        ]
    #Define input node that will receive input from outside of workflow
    inputnode = pe.Node(niu.IdentityInterface(fields=in_list),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=out_list),
                         name='outputnode')
    #Define empty node for output

    #Create Identity Transform
    identity_transform = pe.Node(param2xfmCommand(), name="identity_transform")
    identity_transform.inputs.translation = "0 0 0"
    identity_transform.inputs.rotation = "0 0 0"
    identity_transform.inputs.scales = "1 1 1"

    if not opts.nopvc and not opts.pvc_method == None:
        pvc_tfm_node, pvc_tfm_file, pvc_target_file = get_transforms_for_stage(
            inputnode, opts.pvc_label_space, opts.analysis_space,
            identity_transform)

    if not opts.tka_method == None:
        tka_tfm_node, tka_tfm_file, tka_target_file = get_transforms_for_stage(
            inputnode, opts.tka_label_space, opts.analysis_space,
            identity_transform)

    results_tfm_node, results_tfm_file, results_target_file = get_transforms_for_stage(
        inputnode, opts.results_label_space, opts.analysis_space,
        identity_transform)

    ###################
    # Brain Mask Node #
    ###################
    if opts.analysis_space != "stereo":
        brain_mask_node = pe.Node(minc.Resample(), "brain_mask")
        brain_mask_node.inputs.nearest_neighbour_interpolation = True

        workflow.connect(inputnode, "brainmask", brain_mask_node, "input_file")
        if opts.analysis_space == "t1":
            workflow.connect(inputnode, "LinMNIT1Xfm", brain_mask_node,
                             "transformation")
            like_file = "nativeT1"
            workflow.connect(inputnode, "nativeT1", brain_mask_node, "like")
        elif opts.analysis_space == "pet":
            workflow.connect(inputnode, "LinMNIPETXfm", brain_mask_node,
                             "transformation")
            workflow.connect(inputnode, "pet_volume", brain_mask_node, "like")
            like_file = "pet_volume"
        else:
            print("Error: Analysis space must be one of pet,stereo,t1 but is",
                  opts.analysis_space)
            exit(1)
    else:
        brain_mask_node = pe.Node(
            niu.IdentityInterface(fields=["output_file"]), "brain_mask")
        workflow.connect(inputnode, "brainmask", brain_mask_node,
                         "output_file")
        like_file = "mniT1"

    #################
    # Surface masks #
    #################
    if opts.use_surfaces:
        if opts.analysis_space != "stereo":
            surface_left_node = pe.Node(transform_objectCommand(),
                                        name="surface_left_node")
            surface_right_node = pe.Node(transform_objectCommand(),
                                         name="surface_right_node")
            workflow.connect(inputnode, 'surf_left', surface_left_node,
                             'in_file')
            workflow.connect(inputnode, 'surf_right', surface_right_node,
                             'in_file')
            if opts.analysis_space == "t1":
                workflow.connect(inputnode, "LinMNIT1Xfm", surface_left_node,
                                 'tfm_file')
                workflow.connect(inputnode, "LinMNIT1Xfm", surface_right_node,
                                 'tfm_file')
            elif opts.analysis_space == "pet":
                workflow.connect(inputnode, 'LinMNIPETXfm', surface_left_node,
                                 'tfm_file')
                workflow.connect(inputnode, 'LinMNIPETXfm', surface_right_node,
                                 'tfm_file')
        else:
            surface_left_node = pe.Node(
                niu.IdentityInterface(fields=["output_file"]),
                "surf_left_node")
            surface_right_node = pe.Node(
                niu.IdentityInterface(fields=["output_file"]),
                "surf_right_node")
            workflow.connect(inputnode, "surf_left", surface_left_node,
                             "output_file")
            workflow.connect(inputnode, "surf_right", surface_right_node,
                             "output_file")

    resultsLabels = pe.Node(interface=Labels(), name="resultsLabels")
    resultsLabels.inputs.analysis_space = opts.analysis_space
    resultsLabels.inputs.label_type = opts.results_label_type
    resultsLabels.inputs.space = opts.results_label_space
    resultsLabels.inputs.erode_times = opts.results_erode_times
    resultsLabels.inputs.brain_only = opts.results_labels_brain_only
    resultsLabels.inputs.ones_only = opts.results_labels_ones_only
    workflow.connect(inputnode, 'results_labels', resultsLabels, 'labels')
    workflow.connect(inputnode, 'results_label_img', resultsLabels,
                     'label_img')
    workflow.connect(inputnode, 'results_label_template', resultsLabels,
                     'label_template')
    workflow.connect(inputnode, like_file, resultsLabels, 'like_file')
    workflow.connect(brain_mask_node, "output_file", resultsLabels,
                     'brainmask')
    workflow.connect(results_tfm_node, results_tfm_file, resultsLabels,
                     "LinXfm")

    if not opts.nopvc and not opts.pvc_method == None:
        pvcLabels = pe.Node(interface=Labels(), name="pvcLabels")
        pvcLabels.inputs.analysis_space = opts.analysis_space
        pvcLabels.inputs.label_type = opts.pvc_label_type
        pvcLabels.inputs.space = opts.pvc_label_space
        pvcLabels.inputs.erode_times = opts.pvc_erode_times
        pvcLabels.inputs.brain_only = opts.pvc_labels_brain_only
        pvcLabels.inputs.ones_only = opts.pvc_labels_ones_only
        workflow.connect(inputnode, 'pvc_labels', pvcLabels, 'labels')
        workflow.connect(inputnode, 'pvc_label_img', pvcLabels, 'label_img')
        workflow.connect(inputnode, 'pvc_label_template', pvcLabels,
                         'label_template')
        workflow.connect(inputnode, like_file, pvcLabels, 'like_file')
        workflow.connect(brain_mask_node, "output_file", pvcLabels,
                         'brainmask')
        workflow.connect(pvc_tfm_node, pvc_tfm_file, pvcLabels, "LinXfm")
    if not opts.tka_method == None:
        tkaLabels = pe.Node(interface=Labels(), name="tkaLabels")
        tkaLabels.inputs.analysis_space = opts.analysis_space
        tkaLabels.inputs.label_type = opts.tka_label_type
        tkaLabels.inputs.space = opts.tka_label_space
        tkaLabels.inputs.erode_times = opts.tka_erode_times
        tkaLabels.inputs.brain_only = opts.tka_labels_brain_only
        tkaLabels.inputs.ones_only = opts.tka_labels_ones_only
        workflow.connect(inputnode, 'tka_labels', tkaLabels, 'labels')
        workflow.connect(inputnode, 'tka_label_img', tkaLabels, 'label_img')
        workflow.connect(inputnode, 'tka_label_template', tkaLabels,
                         'label_template')
        workflow.connect(inputnode, like_file, tkaLabels, 'like_file')
        workflow.connect(brain_mask_node, "output_file", tkaLabels,
                         'brainmask')
        workflow.connect(tka_tfm_node, tka_tfm_file, tkaLabels, "LinXfm")

    return (workflow)
コード例 #18
0
def create_workflow(undist):
    featpreproc = pe.Workflow(name="featpreproc")

    featpreproc.base_dir = os.path.join(ds_root, 'workingdirs')

    # ===================================================================
    #                  _____                   _
    #                 |_   _|                 | |
    #                   | |  _ __  _ __  _   _| |_
    #                   | | | '_ \| '_ \| | | | __|
    #                  _| |_| | | | |_) | |_| | |_
    #                 |_____|_| |_| .__/ \__,_|\__|
    #                             | |
    #                             |_|
    # ===================================================================

    # ------------------ Specify variables
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            'funcs',
            'subject_id',
            'session_id',
            'refsubject_id',
            'fwhm',  # smoothing
            'highpass'
        ]),
        name="inputspec")

    if undist:
        ud_flag = '_undist_PLUS'
    else:
        ud_flag = ''

    # SelectFiles
    templates = {
        # EPI ========
        'ref_func':
        'reference-vols/sub-{refsubject_id}/func/'
        'sub-{subject_id}_ref_func_res-1x1x1' + ud_flag + '.nii.gz',
        'ref_funcmask':
        'reference-vols/sub-{refsubject_id}/func/'
        'sub-{subject_id}_ref_func_mask_res-1x1x1.nii.gz',

        # T1 ========
        # 0.5 mm iso ---
        'ref_t1':
        'reference-vols/sub-{refsubject_id}/anat/'
        'sub-{subject_id}_ref_anat_res-0.5x0.5x0.5.nii.gz',
        'ref_t1mask':
        'reference-vols/sub-{refsubject_id}/anat/'
        'sub-{subject_id}_ref_anat_mask_res-0.5x0.5x0.5.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name="input_files")

    featpreproc.connect([(inputnode, inputfiles, [
        ('subject_id', 'subject_id'),
        ('session_id', 'session_id'),
        ('refsubject_id', 'refsubject_id'),
    ])])

    # ===================================================================
    #                   ____        _               _
    #                  / __ \      | |             | |
    #                 | |  | |_   _| |_ _ __  _   _| |_
    #                 | |  | | | | | __| '_ \| | | | __|
    #                 | |__| | |_| | |_| |_) | |_| | |_
    #                  \____/ \__,_|\__| .__/ \__,_|\__|
    #                                  | |
    #                                  |_|
    # ===================================================================

    # Datasink
    outputfiles = pe.Node(nio.DataSink(base_directory=ds_root,
                                       container='derivatives/featpreproc',
                                       parameterization=True),
                          name="output_files")

    # Use the following DataSink output substitutions
    # each tuple is only matched once per file
    outputfiles.inputs.substitutions = [
        ('/_mc_method_afni3dAllinSlices/', '/'),
        ('/_mc_method_afni3dAllinSlices/', '/'),  # needs to appear twice
        ('/oned_file/', '/'),
        ('/out_file/', '/'),
        ('/oned_matrix_save/', '/'),
        ('refsubject_id_', 'ref-'),
        ('subject_id_', 'sub-'),
        ('session_id_', 'ses-'),
    ]
    # Put result into a BIDS-like format
    outputfiles.inputs.regexp_substitutions = [
        (r'_ses-([a-zA-Z0-9]+)_sub-([a-zA-Z0-9]+)', r'sub-\2/ses-\1'),
        (r'/_addmean[0-9]+/', r'/func/'),
        (r'/_funcbrains[0-9]+/', r'/func/'),
        (r'/_maskfunc[0-9]+/', r'/func/'),
        (r'/_mc[0-9]+/', r'/func/'),
        (r'/_meanfunc[0-9]+/', r'/func/'),
        (r'/_outliers[0-9]+/', r'/func/'),
        (r'_ref-([a-zA-Z0-9]+)_run_id_[0-9][0-9]', r''),
    ]
    outputnode = pe.Node(interface=util.IdentityInterface(fields=[
        'motion_parameters',
        'motion_corrected',
        'motion_plots',
        'motion_outlier_files',
        'mask',
        'smoothed_files',
        'highpassed_files',
        'mean',
        'func_unwarp',
        'ref_func',
        'ref_funcmask',
        'ref_t1',
        'ref_t1mask',
    ]),
                         name='outputspec')

    # ===================================================================
    #                  _____ _            _ _
    #                 |  __ (_)          | (_)
    #                 | |__) | _ __   ___| |_ _ __   ___
    #                 |  ___/ | '_ \ / _ \ | | '_ \ / _ \
    #                 | |   | | |_) |  __/ | | | | |  __/
    #                 |_|   |_| .__/ \___|_|_|_| |_|\___|
    #                         | |
    #                         |_|
    # ===================================================================

    #  ~|~ _ _  _  _ |` _  _ _ _    _ _  _  _|  _
    #   | | (_|| |_\~|~(_)| | | |  | | |(_|_\|<_\
    #
    # Transform manual skull-stripped masks to multiple images
    # --------------------------------------------------------
    # should just be used as input to motion correction,
    # after mc, all functionals should be aligned to reference
    transmanmask_mc = transform_manualmask.create_workflow()

    # - - - - - - Connections - - - - - - -
    featpreproc.connect([(inputfiles, transmanmask_mc, [
        ('subject_id', 'in.subject_id'),
        ('session_id', 'in.session_id'),
        ('refsubject_id', 'in.refsubject_id'),
    ])])

    #featpreproc.connect(inputfiles, 'ref_funcmask',
    #                    transmanmask_mc, 'in.manualmask')
    featpreproc.connect(inputfiles, 'ref_funcmask', transmanmask_mc,
                        'in.ref_funcmask')
    featpreproc.connect(inputnode, 'funcs', transmanmask_mc, 'in.funcs')

    featpreproc.connect(inputfiles, 'ref_func', transmanmask_mc, 'in.ref_func')

    #  |\/| _ _|_. _  _    _ _  _ _ _  __|_. _  _
    #  |  |(_) | |(_)| |  (_(_)| | (/_(_ | |(_)| |
    #
    # Perform motion correction, using some pipeline
    # --------------------------------------------------------

    # Register an image from the functionals to the reference image
    median_func = pe.MapNode(
        interface=fsl.maths.MedianImage(dimension="T"),
        name='median_func',
        iterfield=('in_file'),
    )
    #     pre_mc = motioncorrection_workflow.create_workflow_allin_slices(
    #         name='premotioncorrection')

    pre_mc = create_workflow_allin_slices(name='premotioncorrection')

    featpreproc.connect([
        (inputnode, median_func, [
            ('funcs', 'in_file'),
        ]),
        (median_func, pre_mc, [
            ('out_file', 'in.funcs'),
        ]),
        (
            inputfiles,
            pre_mc,
            [
                # median func image will be used a reference / base
                ('ref_func', 'in.ref_func'),
                ('ref_funcmask', 'in.ref_func_weights'),
            ]),
        (
            transmanmask_mc,
            pre_mc,
            [
                ('funcreg.out_file', 'in.funcs_masks'
                 ),  # use mask as weights >>>> are we sure this is correct?
            ]),
        (pre_mc, outputnode, [
            ('mc.out_file', 'pre_motion_corrected'),
            ('mc.oned_file', 'pre_motion_parameters.oned_file'),
            ('mc.oned_matrix_save', 'pre_motion_parameters.oned_matrix_save'),
        ]),
        (
            outputnode,
            outputfiles,
            [
                ('pre_motion_corrected', 'pre_motion_corrected.out_file'),
                ('pre_motion_parameters.oned_file',
                 'pre_motion_corrected.oned_file'),
                # warp parameters in ASCII (.1D)
                ('pre_motion_parameters.oned_matrix_save',
                 'pre_motion_corrected.oned_matrix_save'),
                # transformation matrices for each sub-brick
            ]),
    ])

    #     mc = motioncorrection_workflow.create_workflow_allin_slices(
    #         name='motioncorrection',
    #         iterfield=('in_file', 'ref_file', 'in_weight_file'))

    mc = create_workflow_allin_slices(name='motioncorrection',
                                      iterfield=('in_file', 'ref_file',
                                                 'in_weight_file'))

    # - - - - - - Connections - - - - - - -
    featpreproc.connect([
        (inputnode, mc, [
            ('funcs', 'in.funcs'),
        ]),
        (
            pre_mc,
            mc,
            [
                # the median image realigned to the reference functional
                # will serve as reference. This way motion correction is
                #  done to an image more similar to the functionals
                ('mc.out_file', 'in.ref_func'),
            ]),
        (
            inputfiles,
            mc,
            [
                # Check and make sure the ref func mask is close enough
                # to the registered median image.
                ('ref_funcmask', 'in.ref_func_weights'),
            ]),
        (
            transmanmask_mc,
            mc,
            [
                ('funcreg.out_file', 'in.funcs_masks'),  # use mask as weights
            ]),
        (mc, outputnode, [
            ('mc.out_file', 'motion_corrected'),
            ('mc.oned_file', 'motion_parameters.oned_file'),
            ('mc.oned_matrix_save', 'motion_parameters.oned_matrix_save'),
        ]),
        (
            outputnode,
            outputfiles,
            [
                ('motion_corrected', 'motion_corrected.out_file'),
                ('motion_parameters.oned_file', 'motion_corrected.oned_file'),
                # warp parameters in ASCII (.1D)
                ('motion_parameters.oned_matrix_save',
                 'motion_corrected.oned_matrix_save'),
                # transformation matrices for each sub-brick
            ]),
    ])

    #  |~. _ | _| _ _  _  _    _ _  _ _ _  __|_. _  _
    #  |~|(/_|(_|| | |(_||_)  (_(_)| | (/_(_ | |(_)| |
    #                    |
    # Unwarp EPI distortions
    # --------------------------------------------------------

    # Performing motion correction to a reference that is undistorted,
    # So b0_unwarp is currently not needed or used.

    # we have moved this to a separate workflow and use the blip-up/down
    # method now (reverse phase-encoding directions). This has also been
    # done for the new reference images.

    featpreproc.connect([
        (inputfiles, outputfiles, [
            ('ref_func', 'reference/func'),
            ('ref_funcmask', 'reference/func_mask'),
        ]),
        (inputfiles, outputnode, [
            ('ref_func', 'ref_func'),
            ('ref_funcmask', 'ref_funcmask'),
        ]),
    ])

    #  |\/| _ _|_. _  _    _   _|_|. _  _ _
    #  |  |(_) | |(_)| |  (_)|_|| ||(/_| _\
    #
    # --------------------------------------------------------

    # Apply brain masks to functionals
    # --------------------------------------------------------

    # Dilate mask
    dilatemask = pe.Node(interface=fsl.ImageMaths(suffix='_dil',
                                                  op_string='-dilF'),
                         name='dilatemask')
    featpreproc.connect(inputfiles, 'ref_funcmask', dilatemask, 'in_file')
    featpreproc.connect(dilatemask, 'out_file', outputfiles, 'dilate_mask')

    funcbrains = pe.MapNode(fsl.BinaryMaths(operation='mul'),
                            iterfield=('in_file', 'operand_file'),
                            name='funcbrains')

    featpreproc.connect([
        (mc, funcbrains, [
            ('mc.out_file', 'in_file'),
        ]),
        (dilatemask, funcbrains, [
            ('out_file', 'operand_file'),
        ]),
        (funcbrains, outputfiles, [
            ('out_file', 'funcbrains'),
        ]),
    ])
    # Detect motion outliers
    # --------------------------------------------------------

    import nipype.algorithms.rapidart as ra
    outliers = pe.MapNode(
        ra.ArtifactDetect(
            mask_type='file',
            use_norm=True,
            norm_threshold=10.0,  # combines translations in mm and rotations
            zintensity_threshold=3.0,  # z-score
            parameter_source='AFNI',
            save_plot=True),
        iterfield=('realigned_files', 'realignment_parameters', 'mask_file'),
        name='outliers')

    featpreproc.connect([
        (
            mc,
            outliers,
            [  # ('mc.par_file', 'realignment_parameters'),
                ('mc.oned_file', 'realignment_parameters'),
            ]),
        (funcbrains, outliers, [
            ('out_file', 'realigned_files'),
        ]),
        (dilatemask, outliers, [
            ('out_file', 'mask_file'),
        ]),
        (
            outliers,
            outputfiles,
            [
                ('outlier_files', 'motion_outliers.@outlier_files'),
                ('plot_files', 'motion_outliers.@plot_files'),
                ('displacement_files', 'motion_outliers.@displacement_files'),
                ('intensity_files', 'motion_outliers.@intensity_files'),
                ('mask_files', 'motion_outliers.@mask_files'),
                ('statistic_files', 'motion_outliers.@statistic_files'),
                # ('norm_files', 'outliers.@norm_files'),
            ]),
        (mc, outputnode, [
            ('mc.oned_file', 'motion_parameters'),
        ]),
        (
            outliers,
            outputnode,
            [
                ('outlier_files', 'motion_outlier_files'),
                ('plot_files', 'motion_plots.@plot_files'),
                ('displacement_files', 'motion_outliers.@displacement_files'),
                ('intensity_files', 'motion_outliers.@intensity_files'),
                ('mask_files', 'motion_outliers.@mask_files'),
                ('statistic_files', 'motion_outliers.@statistic_files'),
                # ('norm_files', 'outliers.@norm_files'),
            ])
    ])
    """
    Determine the 2nd and 98th percentile intensities of each functional run
    """
    getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
                           iterfield=['in_file'],
                           name='getthreshold')

    featpreproc.connect(mc, 'mc.out_file', getthresh, 'in_file')
    """
    Threshold the first run of functional data at 10% of the 98th percentile
    """

    threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
                                                    suffix='_thresh'),
                           iterfield=['in_file', 'op_string'],
                           name='threshold')

    featpreproc.connect(mc, 'mc.out_file', threshold, 'in_file')
    """
    Define a function to get 10% of the intensity
    """
    def getthreshop(thresh):
        return ['-thr %.10f -Tmin -bin' % (0.1 * val[1]) for val in thresh]

    featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold,
                        'op_string')
    """
    Determine the median value of the functional runs using the mask
    """
    medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
                           iterfield=['in_file', 'mask_file'],
                           name='medianval')

    featpreproc.connect(mc, 'mc.out_file', medianval, 'in_file')
    featpreproc.connect(threshold, 'out_file', medianval, 'mask_file')

    # (~ _  _ _|_. _ |  (~ _ _  _  _ _|_|_ . _  _
    # _)|_)(_| | |(_||  _)| | |(_)(_) | | ||| |(_|
    #   |                                       _|
    # Spatial smoothing (SUSAN)
    # --------------------------------------------------------

    # create_susan_smooth takes care of calculating the mean and median
    #   functional, applying mask to functional, and running the smoothing
    smooth = create_susan_smooth(separate_masks=False)

    featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm')

    featpreproc.connect(mc, 'mc.out_file', smooth, 'inputnode.in_files')

    featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file')

    # -------------------------------------------------------
    # The below is from workflows/fmri/fsl/preprocess.py
    """
    Mask the smoothed data with the dilated mask
    """

    maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
                                                    op_string='-mas'),
                           iterfield=['in_file', 'in_file2'],
                           name='maskfunc3')
    featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3,
                        'in_file')

    featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')

    concatnode = pe.Node(interface=util.Merge(2), name='concat')

    tolist = lambda x: [x]

    def chooseindex(fwhm):
        if fwhm < 1:
            return [0]
        else:
            return [1]

    # maskfunc2 is the functional data before SUSAN
    featpreproc.connect(mc, ('mc.out_file', tolist), concatnode, 'in1')

    # maskfunc3 is the functional data after SUSAN
    featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2')
    """
    The following nodes select smooth or unsmoothed data depending on the
    fwhm. This is because SUSAN defaults to smoothing the data with about the
    voxel size of the input data if the fwhm parameter is less than 1/3 of the
    voxel size.
    """
    selectnode = pe.Node(interface=util.Select(), name='select')

    featpreproc.connect(concatnode, 'out', selectnode, 'inlist')

    featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index')
    featpreproc.connect(selectnode, 'out', outputfiles, 'smoothed_files')
    """
    Scale the median value of the run is set to 10000.
    """

    meanscale = pe.MapNode(interface=fsl.ImageMaths(suffix='_gms'),
                           iterfield=['in_file', 'op_string'],
                           name='meanscale')
    featpreproc.connect(selectnode, 'out', meanscale, 'in_file')
    """
    Define a function to get the scaling factor for intensity normalization
    """

    featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale,
                        'op_string')

    # |_|. _ |_  _  _  _ _
    # | ||(_|| ||_)(_|_\_\
    #      _|   |
    # Temporal filtering
    # --------------------------------------------------------

    highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
                          iterfield=['in_file'],
                          name='highpass')
    highpass_operand = lambda x: '-bptf %.10f -1' % x
    featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass,
                        'op_string')
    featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')

    version = 0
    if fsl.Info.version() and \
            LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'):
        version = 507

    if version < 507:
        featpreproc.connect(highpass, 'out_file', outputnode,
                            'highpassed_files')
    else:
        """
        Add back the mean removed by the highpass filter operation as
            of FSL 5.0.7
        """
        meanfunc4 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
                                                        suffix='_mean'),
                               iterfield=['in_file'],
                               name='meanfunc4')

        featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file')
        addmean = pe.MapNode(interface=fsl.BinaryMaths(operation='add'),
                             iterfield=['in_file', 'operand_file'],
                             name='addmean')
        featpreproc.connect(highpass, 'out_file', addmean, 'in_file')
        featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file')
        featpreproc.connect(addmean, 'out_file', outputnode,
                            'highpassed_files')
    """
    Generate a mean functional image from the first run
    """
    meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
                                                    suffix='_mean'),
                           iterfield=['in_file'],
                           name='meanfunc3')

    featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file')
    featpreproc.connect(meanfunc3, 'out_file', outputfiles, 'mean')

    featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean_highpassed')
    featpreproc.connect(outputnode, 'highpassed_files', outputfiles,
                        'highpassed_files')

    return (featpreproc)
コード例 #19
0
ファイル: outputs.py プロジェクト: wayne009007/sdcflows
def init_fmap_reports_wf(
    *,
    output_dir,
    fmap_type,
    bids_fmap_id=None,
    custom_entities=None,
    name="fmap_reports_wf",
):
    """
    Set up a battery of datasinks to store reports in the right location.

    Parameters
    ----------
    fmap_type : :obj:`str`
        The fieldmap estimator type.
    output_dir : :obj:`str`
        Directory in which to save derivatives
    bids_fmap_id : :obj:`str`
        Sets the ``B0FieldIdentifier`` metadata into the outputs.
    custom_entities : :obj:`dict`
        Define extra entities that will be written out in filenames.
    name : :obj:`str`
        Workflow name (default: ``"fmap_reports_wf"``)

    Inputs
    ------
    source_files
        One or more fieldmap file(s) of the BIDS dataset that will serve for naming reference.
    fieldmap
        The preprocessed fieldmap, in its original space with Hz units.
    fmap_ref
        An anatomical reference (e.g., magnitude file)
    fmap_mask
        A brain mask in the fieldmap's space.

    """
    from ..interfaces.reportlets import FieldmapReportlet

    custom_entities = custom_entities or {}
    if bids_fmap_id:
        custom_entities["fmapid"] = bids_fmap_id.replace("_", "")

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=["source_files", "fieldmap", "fmap_ref", "fmap_mask"]),
        name="inputnode",
    )

    rep = pe.Node(FieldmapReportlet(), "simple_report")
    rep.interface._always_run = True

    ds_fmap_report = pe.Node(
        DerivativesDataSink(
            base_directory=str(output_dir),
            datatype="figures",
            suffix="fieldmap",
            desc=fmap_type,
            dismiss_entities=("fmap", ),
            allowed_entities=tuple(custom_entities.keys()),
        ),
        name="ds_fmap_report",
    )
    for k, v in custom_entities.items():
        setattr(ds_fmap_report.inputs, k, v)

    # fmt:off
    workflow.connect([
        (inputnode, rep, [("fieldmap", "fieldmap"), ("fmap_ref", "reference"),
                          ("fmap_mask", "mask")]),
        (rep, ds_fmap_report, [("out_report", "in_file")]),
        (inputnode, ds_fmap_report, [("source_files", "source_file")]),
    ])
    # fmt:on

    return workflow
コード例 #20
0
Created on Mon Apr 28 16:36:07 2014

@author: Dalton
"""

import os  # system functions

import nipype.interfaces.io as nio  # Data i/o
import nipype.interfaces.fsl as fsl  # fsl
import nipype.interfaces.utility as util  # utility
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.algorithms.modelgen as model  # model generation
import nipype.algorithms.rapidart as ra  # artifact detection

from nipype.workflows.fmri.fsl import (create_featreg_preproc,
                                       create_modelfit_workflow,
                                       create_fixed_effects_flow)

# The output file format for FSL routines is being set to compressed NIFTI
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

#Name the node for the level1analysis
level1_workflow = pe.Workflow(name='level1flow')

#??????
preproc = create_featreg_preproc(whichvol='first')
#??????
modelfit = create_modelfit_workflow()
#??????
fixed_fx = create_fixed_effects_flow()
コード例 #21
0
def create_median_angle_correction(name='median_angle_correction'):
    """
    Median Angle Correction
    
    Parameters
    ----------
    name : string, optional
        Name of the workflow.
            
    Returns
    -------
    median_angle_correction : nipype.pipeline.engine.Workflow
        Median Angle Correction workflow.
    
    Notes
    -----
    
    Workflow Inputs::
    
        inputspec.subject : string (nifti file)
            Realigned nifti file of a subject
        inputspec.target_angle : integer
            Target angle in degrees to correct the median angle to
            
    Workflow Outputs::
    
        outputspec.subject : string (nifti file)
            Median angle corrected nifti file of the given subject
        outputspec.pc_angles : string (.npy file)
            Numpy file (.npy file) containing the angles (in radians) of all voxels with 
            the 5 largest principal components.

    Median Angle Correction Procedure:
    
    1. Compute the median angle with respect to the first principal component of the subject
    2. Shift the angle of every voxel so that the new median angle equals the target angle

    Workflow Graph:
    
    .. image:: ../images/median_angle_correction.dot.png
        :width: 500
    
    Detailed Workflow Graph:
    
    .. image:: ../images/median_angle_correction_detailed.dot.png
        :width: 500    

    """
    median_angle_correction = pe.Workflow(name=name)
    
    inputspec = pe.Node(util.IdentityInterface(fields=['subject',
                                                       'target_angle']),
                        name='inputspec')
    outputspec = pe.Node(util.IdentityInterface(fields=['subject',
                                                        'pc_angles']),
                         name='outputspec')
    
    mac = pe.Node(util.Function(input_names=['target_angle_deg',
                                             'realigned_file'],
                                output_names=['corrected_file',
                                              'angles_file'],
                                function=median_angle_correct),
                  name='median_angle_correct')
    
    median_angle_correction.connect(inputspec, 'subject',
                                    mac, 'realigned_file')
    median_angle_correction.connect(inputspec, 'target_angle',
                                    mac, 'target_angle_deg')
    median_angle_correction.connect(mac, 'corrected_file',
                                    outputspec, 'subject')
    median_angle_correction.connect(mac, 'angles_file',
                                    outputspec, 'pc_angles')
    
    return median_angle_correction
コード例 #22
0
def analyze_openfmri_dataset(data_dir,
                             subject=None,
                             model_id=None,
                             task_id=None,
                             output_dir=None,
                             subj_prefix='*',
                             hpcutoff=120.,
                             use_derivatives=True,
                             fwhm=6.0,
                             subjects_dir=None,
                             target=None):
    """Analyzes an open fmri dataset

    Parameters
    ----------

    data_dir : str
        Path to the base data directory

    work_dir : str
        Nipype working directory (defaults to cwd)
    """
    """
    Load nipype workflows
    """

    preproc = create_featreg_preproc(whichvol='first')
    modelfit = create_modelfit_workflow()
    fixed_fx = create_fixed_effects_flow()
    if subjects_dir:
        registration = create_fs_reg_workflow()
    else:
        registration = create_reg_workflow()
    """
    Remove the plotting connection so that plot iterables don't propagate
    to the model stage
    """

    preproc.disconnect(preproc.get_node('plot_motion'), 'out_file',
                       preproc.get_node('outputspec'), 'motion_plots')
    """
    Set up openfmri data specific components
    """

    subjects = sorted([
        path.split(os.path.sep)[-1]
        for path in glob(os.path.join(data_dir, subj_prefix))
    ])

    infosource = pe.Node(
        niu.IdentityInterface(fields=['subject_id', 'model_id', 'task_id']),
        name='infosource')
    if len(subject) == 0:
        infosource.iterables = [('subject_id', subjects),
                                ('model_id', [model_id]), ('task_id', task_id)]
    else:
        infosource.iterables = [
            ('subject_id',
             [subjects[subjects.index(subj)] for subj in subject]),
            ('model_id', [model_id]), ('task_id', task_id)
        ]

    subjinfo = pe.Node(niu.Function(
        input_names=['subject_id', 'base_dir', 'task_id', 'model_id'],
        output_names=['run_id', 'conds', 'TR'],
        function=get_subjectinfo),
                       name='subjectinfo')
    subjinfo.inputs.base_dir = data_dir
    """
    Return data components as anat, bold and behav
    """

    contrast_file = os.path.join(data_dir, 'models', 'model%03d' % model_id,
                                 'task_contrasts.txt')
    has_contrast = os.path.exists(contrast_file)
    if has_contrast:
        datasource = pe.Node(nio.DataGrabber(
            infields=['subject_id', 'run_id', 'task_id', 'model_id'],
            outfields=['anat', 'bold', 'behav', 'contrasts']),
                             name='datasource')
    else:
        datasource = pe.Node(nio.DataGrabber(
            infields=['subject_id', 'run_id', 'task_id', 'model_id'],
            outfields=['anat', 'bold', 'behav']),
                             name='datasource')
    datasource.inputs.base_directory = data_dir
    datasource.inputs.template = '*'

    if has_contrast:
        datasource.inputs.field_template = {
            'anat': '%s/anatomy/T1_001.nii.gz',
            'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
            'behav': ('%s/model/model%03d/onsets/task%03d_'
                      'run%03d/cond*.txt'),
            'contrasts': ('models/model%03d/'
                          'task_contrasts.txt')
        }
        datasource.inputs.template_args = {
            'anat': [['subject_id']],
            'bold': [['subject_id', 'task_id']],
            'behav': [['subject_id', 'model_id', 'task_id', 'run_id']],
            'contrasts': [['model_id']]
        }
    else:
        datasource.inputs.field_template = {
            'anat': '%s/anatomy/T1_001.nii.gz',
            'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
            'behav': ('%s/model/model%03d/onsets/task%03d_'
                      'run%03d/cond*.txt')
        }
        datasource.inputs.template_args = {
            'anat': [['subject_id']],
            'bold': [['subject_id', 'task_id']],
            'behav': [['subject_id', 'model_id', 'task_id', 'run_id']]
        }

    datasource.inputs.sort_filelist = True
    """
    Create meta workflow
    """

    wf = pe.Workflow(name='openfmri')
    wf.connect(infosource, 'subject_id', subjinfo, 'subject_id')
    wf.connect(infosource, 'model_id', subjinfo, 'model_id')
    wf.connect(infosource, 'task_id', subjinfo, 'task_id')
    wf.connect(infosource, 'subject_id', datasource, 'subject_id')
    wf.connect(infosource, 'model_id', datasource, 'model_id')
    wf.connect(infosource, 'task_id', datasource, 'task_id')
    wf.connect(subjinfo, 'run_id', datasource, 'run_id')
    wf.connect([
        (datasource, preproc, [('bold', 'inputspec.func')]),
    ])

    def get_highpass(TR, hpcutoff):
        return hpcutoff / (2. * TR)

    gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'],
                                       output_names=['highpass'],
                                       function=get_highpass),
                          name='gethighpass')
    wf.connect(subjinfo, 'TR', gethighpass, 'TR')
    wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass')
    """
    Setup a basic set of contrasts, a t-test per condition
    """

    def get_contrasts(contrast_file, task_id, conds):
        import numpy as np
        import os
        contrast_def = []
        if os.path.exists(contrast_file):
            with open(contrast_file, 'rt') as fp:
                contrast_def.extend([
                    np.array(row.split()) for row in fp.readlines()
                    if row.strip()
                ])
        contrasts = []
        for row in contrast_def:
            if row[0] != 'task%03d' % task_id:
                continue
            con = [
                row[1], 'T', ['cond%03d' % (i + 1) for i in range(len(conds))],
                row[2:].astype(float).tolist()
            ]
            contrasts.append(con)
        # add auto contrasts for each column
        for i, cond in enumerate(conds):
            con = [cond, 'T', ['cond%03d' % (i + 1)], [1]]
            contrasts.append(con)
        return contrasts

    contrastgen = pe.Node(niu.Function(
        input_names=['contrast_file', 'task_id', 'conds'],
        output_names=['contrasts'],
        function=get_contrasts),
                          name='contrastgen')

    art = pe.MapNode(
        interface=ra.ArtifactDetect(use_differences=[True, False],
                                    use_norm=True,
                                    norm_threshold=1,
                                    zintensity_threshold=3,
                                    parameter_source='FSL',
                                    mask_type='file'),
        iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
        name="art")

    modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
    modelspec.inputs.input_units = 'secs'

    def check_behav_list(behav, run_id, conds):
        import numpy as np
        num_conds = len(conds)
        if isinstance(behav, string_types):
            behav = [behav]
        behav_array = np.array(behav).flatten()
        num_elements = behav_array.shape[0]
        return behav_array.reshape(int(num_elements / num_conds),
                                   num_conds).tolist()

    reshape_behav = pe.Node(niu.Function(
        input_names=['behav', 'run_id', 'conds'],
        output_names=['behav'],
        function=check_behav_list),
                            name='reshape_behav')

    wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
    wf.connect(datasource, 'behav', reshape_behav, 'behav')
    wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
    wf.connect(subjinfo, 'conds', reshape_behav, 'conds')
    wf.connect(reshape_behav, 'behav', modelspec, 'event_files')

    wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
    wf.connect(subjinfo, 'conds', contrastgen, 'conds')
    if has_contrast:
        wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
    else:
        contrastgen.inputs.contrast_file = ''
    wf.connect(infosource, 'task_id', contrastgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')

    wf.connect([(preproc, art,
                 [('outputspec.motion_parameters', 'realignment_parameters'),
                  ('outputspec.realigned_files', 'realigned_files'),
                  ('outputspec.mask', 'mask_file')]),
                (preproc, modelspec,
                 [('outputspec.highpassed_files', 'functional_runs'),
                  ('outputspec.motion_parameters', 'realignment_parameters')]),
                (art, modelspec, [('outlier_files', 'outlier_files')]),
                (modelspec, modelfit, [('session_info',
                                        'inputspec.session_info')]),
                (preproc, modelfit, [('outputspec.highpassed_files',
                                      'inputspec.functional_data')])])

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(preproc, "outputspec.realigned_files", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """
    Reorder the copes so that now it combines across runs
    """

    def sort_copes(copes, varcopes, contrasts):
        import numpy as np
        if not isinstance(copes, list):
            copes = [copes]
            varcopes = [varcopes]
        num_copes = len(contrasts)
        n_runs = len(copes)
        all_copes = np.array(copes).flatten()
        all_varcopes = np.array(varcopes).flatten()
        outcopes = all_copes.reshape(int(len(all_copes) / num_copes),
                                     num_copes).T.tolist()
        outvarcopes = all_varcopes.reshape(int(len(all_varcopes) / num_copes),
                                           num_copes).T.tolist()
        return outcopes, outvarcopes, n_runs

    cope_sorter = pe.Node(niu.Function(
        input_names=['copes', 'varcopes', 'contrasts'],
        output_names=['copes', 'varcopes', 'n_runs'],
        function=sort_copes),
                          name='cope_sorter')

    pickfirst = lambda x: x[0]

    wf.connect(contrastgen, 'contrasts', cope_sorter, 'contrasts')
    wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
                                      'flameo.mask_file')]),
                (modelfit, cope_sorter, [('outputspec.copes', 'copes')]),
                (modelfit, cope_sorter, [('outputspec.varcopes', 'varcopes')]),
                (cope_sorter, fixed_fx, [('copes', 'inputspec.copes'),
                                         ('varcopes', 'inputspec.varcopes'),
                                         ('n_runs', 'l2model.num_copes')]),
                (modelfit, fixed_fx, [
                    ('outputspec.dof_file', 'inputspec.dof_files'),
                ])])

    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    if subjects_dir:
        wf.connect(infosource, 'subject_id', registration,
                   'inputspec.subject_id')
        registration.inputs.inputspec.subjects_dir = subjects_dir
        registration.inputs.inputspec.target_image = fsl.Info.standard_image(
            'MNI152_T1_2mm_brain.nii.gz')
        if target:
            registration.inputs.inputspec.target_image = target
    else:
        wf.connect(datasource, 'anat', registration,
                   'inputspec.anatomical_image')
        registration.inputs.inputspec.target_image = fsl.Info.standard_image(
            'MNI152_T1_2mm.nii.gz')
        registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image(
            'MNI152_T1_2mm_brain.nii.gz')
        registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'

    def merge_files(copes, varcopes, zstats):
        out_files = []
        splits = []
        out_files.extend(copes)
        splits.append(len(copes))
        out_files.extend(varcopes)
        splits.append(len(varcopes))
        out_files.extend(zstats)
        splits.append(len(zstats))
        return out_files, splits

    mergefunc = pe.Node(niu.Function(
        input_names=['copes', 'varcopes', 'zstats'],
        output_names=['out_files', 'splits'],
        function=merge_files),
                        name='merge_files')
    wf.connect([(fixed_fx.get_node('outputspec'), mergefunc, [
        ('copes', 'copes'),
        ('varcopes', 'varcopes'),
        ('zstats', 'zstats'),
    ])])
    wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files')

    def split_files(in_files, splits):
        copes = in_files[:splits[0]]
        varcopes = in_files[splits[0]:(splits[0] + splits[1])]
        zstats = in_files[(splits[0] + splits[1]):]
        return copes, varcopes, zstats

    splitfunc = pe.Node(niu.Function(
        input_names=['in_files', 'splits'],
        output_names=['copes', 'varcopes', 'zstats'],
        function=split_files),
                        name='split_files')
    wf.connect(mergefunc, 'splits', splitfunc, 'splits')
    wf.connect(registration, 'outputspec.transformed_files', splitfunc,
               'in_files')

    if subjects_dir:
        get_roi_mean = pe.MapNode(fs.SegStats(default_color_table=True),
                                  iterfield=['in_file'],
                                  name='get_aparc_means')
        get_roi_mean.inputs.avgwf_txt_file = True
        wf.connect(fixed_fx.get_node('outputspec'), 'copes', get_roi_mean,
                   'in_file')
        wf.connect(registration, 'outputspec.aparc', get_roi_mean,
                   'segmentation_file')

        get_roi_tsnr = pe.MapNode(fs.SegStats(default_color_table=True),
                                  iterfield=['in_file'],
                                  name='get_aparc_tsnr')
        get_roi_tsnr.inputs.avgwf_txt_file = True
        wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
        wf.connect(registration, 'outputspec.aparc', get_roi_tsnr,
                   'segmentation_file')
    """
    Connect to a datasink
    """

    def get_subs(subject_id, conds, run_id, model_id, task_id):
        subs = [('_subject_id_%s_' % subject_id, '')]
        subs.append(('_model_id_%d' % model_id, 'model%03d' % model_id))
        subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id))
        subs.append(
            ('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp', 'mean'))
        subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_flirt',
                     'affine'))

        for i in range(len(conds)):
            subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1)))
            subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1)))
            subs.append(('_warpall%d/cope1_warp.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_warpall%d/varcope1_warp.' % (len(conds) + i),
                         'varcope%02d.' % (i + 1)))
            subs.append(('_warpall%d/zstat1_warp.' % (2 * len(conds) + i),
                         'zstat%02d.' % (i + 1)))
            subs.append(('_warpall%d/cope1_trans.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_warpall%d/varcope1_trans.' % (len(conds) + i),
                         'varcope%02d.' % (i + 1)))
            subs.append(('_warpall%d/zstat1_trans.' % (2 * len(conds) + i),
                         'zstat%02d.' % (i + 1)))
            subs.append(('__get_aparc_means%d/' % i, '/cope%02d_' % (i + 1)))

        for i, run_num in enumerate(run_id):
            subs.append(('__get_aparc_tsnr%d/' % i, '/run%02d_' % run_num))
            subs.append(('__art%d/' % i, '/run%02d_' % run_num))
            subs.append(('__dilatemask%d/' % i, '/run%02d_' % run_num))
            subs.append(('__realign%d/' % i, '/run%02d_' % run_num))
            subs.append(('__modelgen%d/' % i, '/run%02d_' % run_num))
        subs.append(('/model%03d/task%03d/' % (model_id, task_id), '/'))
        subs.append(('/model%03d/task%03d_' % (model_id, task_id), '/'))
        subs.append(('_bold_dtype_mcf_bet_thresh_dil', '_mask'))
        subs.append(('_output_warped_image', '_anat2target'))
        subs.append(('median_flirt_brain_mask', 'median_brain_mask'))
        subs.append(('median_bbreg_brain_mask', 'median_brain_mask'))
        return subs

    subsgen = pe.Node(niu.Function(
        input_names=['subject_id', 'conds', 'run_id', 'model_id', 'task_id'],
        output_names=['substitutions'],
        function=get_subs),
                      name='subsgen')
    wf.connect(subjinfo, 'run_id', subsgen, 'run_id')

    datasink = pe.Node(interface=nio.DataSink(), name="datasink")
    wf.connect(infosource, 'subject_id', datasink, 'container')
    wf.connect(infosource, 'subject_id', subsgen, 'subject_id')
    wf.connect(infosource, 'model_id', subsgen, 'model_id')
    wf.connect(infosource, 'task_id', subsgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', subsgen, 'conds')
    wf.connect(subsgen, 'substitutions', datasink, 'substitutions')
    wf.connect([(fixed_fx.get_node('outputspec'), datasink,
                 [('res4d', 'res4d'), ('copes', 'copes'),
                  ('varcopes', 'varcopes'), ('zstats', 'zstats'),
                  ('tstats', 'tstats')])])
    wf.connect([(modelfit.get_node('modelgen'), datasink, [
        ('design_cov', 'qa.model'),
        ('design_image', 'qa.model.@matrix_image'),
        ('design_file', 'qa.model.@matrix'),
    ])])
    wf.connect([(preproc, datasink,
                 [('outputspec.motion_parameters', 'qa.motion'),
                  ('outputspec.motion_plots', 'qa.motion.plots'),
                  ('outputspec.mask', 'qa.mask')])])
    wf.connect(registration, 'outputspec.mean2anat_mask', datasink,
               'qa.mask.mean2anat')
    wf.connect(art, 'norm_files', datasink, 'qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'qa.anat2target')
    wf.connect(tsnr, 'tsnr_file', datasink, 'qa.tsnr.@map')
    if subjects_dir:
        wf.connect(registration, 'outputspec.min_cost_file', datasink,
                   'qa.mincost')
        wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'qa.tsnr'),
                                              ('summary_file',
                                               'qa.tsnr.@summary')])])
        wf.connect([(get_roi_mean, datasink, [('avgwf_txt_file', 'copes.roi'),
                                              ('summary_file',
                                               'copes.roi.@summary')])])
    wf.connect([(splitfunc, datasink, [
        ('copes', 'copes.mni'),
        ('varcopes', 'varcopes.mni'),
        ('zstats', 'zstats.mni'),
    ])])
    wf.connect(calc_median, 'median_file', datasink, 'mean')
    wf.connect(registration, 'outputspec.transformed_mean', datasink,
               'mean.mni')
    wf.connect(registration, 'outputspec.func2anat_transform', datasink,
               'xfm.mean2anat')
    wf.connect(registration, 'outputspec.anat2target_transform', datasink,
               'xfm.anat2target')
    """
    Set processing parameters
    """

    preproc.inputs.inputspec.fwhm = fwhm
    gethighpass.inputs.hpcutoff = hpcutoff
    modelspec.inputs.high_pass_filter_cutoff = hpcutoff
    modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': use_derivatives}}
    modelfit.inputs.inputspec.model_serial_correlations = True
    modelfit.inputs.inputspec.film_threshold = 1000

    datasink.inputs.base_directory = output_dir
    return wf
コード例 #23
0
def pipeline(args):
    if args['debug']:
        config.enable_debug_mode()
    config.update_config(
        {'logging': {
            'log_directory': makeSupportDir(args['name'], "logs")
        }})
    logging.update_logging(config)

    # CONSTANTS
    sessionID = args['session']
    outputType = args['format'].upper()
    fOutputType = args['freesurfer']
    preprocessOn = args['preprocess']
    maskGM = args['maskgm']
    maskWholeBrain = args['maskwb']
    maskWhiteMatterFromSeeds = args['maskseeds']
    # print args['name']
    t1_experiment = "20141001_PREDICTHD_long_Results"  #"20130729_PREDICT_Results"
    atlasFile = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "ReferenceAtlas",
                     "template_t1.nii.gz"))
    wholeBrainFile = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "ReferenceAtlas",
                     "template_brain.nii.gz"))
    atlasLabel = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "ReferenceAtlas",
                     "template_nac_labels.nii.gz"))
    resampleResolution = (2.0, 2.0, 2.0)
    downsampledfilename = 'downsampled_atlas.nii.gz'

    master = pipe.Workflow(name=args['name'] + "_CACHE")
    master.base_dir = os.path.abspath("/Shared/sinapse/CACHE")

    sessions = pipe.Node(interface=IdentityInterface(fields=['session_id']),
                         name='sessionIDs')
    sessions.iterables = ('session_id', sessionID)
    downsampleAtlas = pipe.Node(interface=Function(
        function=resampleImage,
        input_names=['inputVolume', 'outputVolume', 'resolution'],
        output_names=['outputVolume']),
                                name="downsampleAtlas")
    downsampleAtlas.inputs.inputVolume = atlasFile
    downsampleAtlas.inputs.outputVolume = downsampledfilename
    downsampleAtlas.inputs.resolution = [int(x) for x in resampleResolution]

    # HACK: Remove node from pipeline until Nipype/AFNI file copy issue is resolved
    # fmri_DataSink = pipe.Node(interface=DataSink(), name="fmri_DataSink")
    # fmri_DataSink.overwrite = REWRITE_DATASINKS
    # Output to: /Shared/paulsen/Experiments/YYYYMMDD_<experiment>_Results/fmri
    # fmri_DataSink.inputs.base_directory = os.path.join(master.base_dir, RESULTS_DIR, 'fmri')
    # fmri_DataSink.inputs.substitutions = [('to_3D_out+orig', 'to3D')]
    # fmri_DataSink.inputs.parameterization = False
    #
    # master.connect([(sessions, fmri_DataSink, [('session_id', 'container')])])
    # END HACK

    registration = registrationWorkflow.workflow(t1_experiment,
                                                 outputType,
                                                 name="registration_wkfl")
    master.connect([(sessions, registration, [('session_id',
                                               "inputs.session_id")])])

    detrend = afninodes.detrendnode(outputType, 'afni3Ddetrend')
    # define grabber
    site = "*"
    subject = "*"
    if preprocessOn:
        grabber = dataio.iowaGrabber(t1_experiment, site, subject, maskGM,
                                     maskWholeBrain)
        master.connect([(sessions, grabber, [('session_id', 'session_id')]),
                        (grabber, registration, [('t1_File', 'inputs.t1')])])
        # Why isn't preprocessWorkflow.workflow() used instead? It would avoid most of the nuisance connections here...
        preprocessing = preprocessWorkflow.prepWorkflow(skipCount=6,
                                                        outputType=outputType)
        name = args.pop(
            'name')  # HACK: prevent name conflict with nuisance workflow
        nuisance = nuisanceWorkflow.workflow(outputType=outputType, **args)
        args['name'] = name  # END HACK
        master.connect([
            (grabber, preprocessing, [('fmri_dicom_dir', 'to_3D.infolder'),
                                      ('fmri_dicom_dir',
                                       'formatFMRINode.dicomDirectory')]),
            (grabber, nuisance, [('whmFile', 'wm.warpWMtoFMRI.input_image')]),
            (
                preprocessing,
                registration,
                [
                    ('merge.out_file', 'inputs.fmri'),  # 7
                    ('automask.out_file', 'tstat.mask_file')
                ]),  # *optional*
            (
                registration,
                nuisance,
                [
                    ('outputs.fmri_reference',
                     'csf.warpCSFtoFMRI.reference_image'),  # CSF
                    ('outputs.nac2fmri_list', 'csf.warpCSFtoFMRI.transforms'),
                    ('outputs.fmri_reference',
                     'wm.warpWMtoFMRI.reference_image'),  # WM
                    ('outputs.t12fmri_list', 'wm.warpWMtoFMRI.transforms')
                ]),
        ])
        warpCSFtoFMRInode = nuisance.get_node('csf').get_node('warpCSFtoFMRI')
        warpCSFtoFMRInode.inputs.input_image = atlasFile
        if maskGM:
            master.connect([
                (grabber, nuisance, [('gryFile', 'gm.warpGMtoFMRI.input_image')
                                     ]),
                (registration, nuisance, [('outputs.fmri_reference',
                                           'gm.warpGMtoFMRI.reference_image'),
                                          ('outputs.t12fmri_list',
                                           'gm.warpGMtoFMRI.transforms')]),
                (preprocessing, nuisance,
                 [('calc.out_file', 'gm.afni3DmaskAve_grm.in_file'),
                  ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])
            ])
        elif maskWholeBrain:
            master.connect([
                (registration, nuisance,
                 [('outputs.fmri_reference',
                   'wb.warpBraintoFMRI.reference_image'),
                  ('outputs.nac2fmri_list', 'wb.warpBraintoFMRI.transforms')]),
                (preprocessing, nuisance,
                 [('calc.out_file', 'wb.afni3DmaskAve_whole.in_file'),
                  ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])
            ])
            warpBraintoFMRInode = nuisance.get_node('wb').get_node(
                'warpBraintoFMRI')
            warpBraintoFMRInode.inputs.input_image = wholeBrainFile
        else:
            master.connect([(preprocessing, nuisance, [
                ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_3')
            ])])

        master.connect([(preprocessing, nuisance,
                         [('calc.out_file', 'wm.afni3DmaskAve_wm.in_file'),
                          ('calc.out_file', 'csf.afni3DmaskAve_csf.in_file'),
                          ('calc.out_file', 'afni3Ddeconvolve.in_file')]),
                        (nuisance, detrend, [('afni3Ddeconvolve.out_errts',
                                              'in_file')])])  # 13
    else:
        cleveland_grabber = dataio.clevelandGrabber()
        grabber = dataio.autoworkupGrabber(t1_experiment, site, subject)
        converter = pipe.Node(interface=Copy(),
                              name='converter')  # Convert ANALYZE to AFNI

        master.connect([
            (sessions, grabber, [('session_id', 'session_id')]),
            (grabber, registration, [('t1_File', 'inputs.t1')]),
            (sessions, cleveland_grabber, [('session_id', 'session_id')]),
            (cleveland_grabber, converter, [('fmriHdr', 'in_file')]),
            (converter, registration, [('out_file', 'inputs.fmri')]),
            (converter, detrend, [('out_file', 'in_file')]),  # in fMRI_space
        ])

    t1_wf = registrationWorkflow.t1Workflow()
    babc_wf = registrationWorkflow.babcWorkflow()
    # HACK: No EPI
    # epi_wf = registrationWorkflow.epiWorkflow()
    lb_wf = registrationWorkflow.labelWorkflow()
    seed_wf = registrationWorkflow.seedWorkflow()
    bandpass = afninodes.fouriernode(
        outputType, 'fourier'
    )  # Fourier is the last NIFTI file format in the AFNI pipeline

    master.connect([
        (detrend, bandpass, [('out_file', 'in_file')
                             ]),  # Per Dawei, bandpass after running 3dDetrend
        (grabber, t1_wf, [('t1_File', 'warpT1toFMRI.input_image')]),
        (
            registration,
            t1_wf,
            [
                ('outputs.fmri_reference',
                 'warpT1toFMRI.reference_image'),  # T1
                ('outputs.t12fmri_list', 'warpT1toFMRI.transforms')
            ]),
        (grabber, babc_wf, [('csfFile', 'warpBABCtoFMRI.input_image')]),
        (
            registration,
            babc_wf,
            [
                ('outputs.fmri_reference',
                 'warpBABCtoFMRI.reference_image'),  # Labels
                ('outputs.t12fmri_list', 'warpBABCtoFMRI.transforms')
            ]),
        # HACK: No EPI
        # (downsampleAtlas, epi_wf, [('outputVolume', 'warpEPItoNAC.reference_image')]),
        # (registration, epi_wf,    [('outputs.fmri2nac_list', 'warpEPItoNAC.transforms')]),
        # (bandpass, epi_wf,         [('out_file', 'warpEPItoNAC.input_image')]),
        # END HACK
        (downsampleAtlas, lb_wf, [('outputVolume',
                                   'warpLabeltoNAC.reference_image')]),
        (registration, lb_wf, [('outputs.fmri2nac_list',
                                'warpLabeltoNAC.transforms')]),
        (t1_wf, seed_wf, [('warpT1toFMRI.output_image',
                           'warpSeedtoFMRI.reference_image')]),
        (registration, seed_wf, [('outputs.nac2fmri_list',
                                  'warpSeedtoFMRI.transforms')]),
    ])

    renameMasks = pipe.Node(interface=Rename(format_string='%(label)s_mask'),
                            name='renameMasksAtlas')
    renameMasks.inputs.keep_ext = True
    atlas_DataSink = dataio.atlasSink(base_directory=master.base_dir, **args)
    master.connect([
        (renameMasks, atlas_DataSink, [('out_file', 'Atlas')]),
        (downsampleAtlas, atlas_DataSink, [('outputVolume', 'Atlas.@resampled')
                                           ]),
    ])

    renameMasks2 = pipe.Node(
        interface=Rename(format_string='%(session)s_%(label)s_mask'),
        name='renameMasksFMRI')
    renameMasks2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameMasks2, 'session')

    clipSeedWithVentriclesNode = pipe.Node(interface=Function(
        function=clipSeedWithVentricles,
        input_names=['seed', 'label', 'outfile'],
        output_names=['clipped_seed_fn']),
                                           name='clipSeedWithVentriclesNode')
    clipSeedWithVentriclesNode.inputs.outfile = "clipped_seed.nii.gz"

    master.connect(seed_wf, 'warpSeedtoFMRI.output_image',
                   clipSeedWithVentriclesNode, 'seed')
    master.connect(babc_wf, 'warpBABCtoFMRI.output_image',
                   clipSeedWithVentriclesNode, 'label')
    if not maskWhiteMatterFromSeeds:
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn',
                       renameMasks2, 'in_file')
    else:
        clipSeedWithWhiteMatterNode = pipe.Node(
            interface=Function(function=clipSeedWithWhiteMatter,
                               input_names=['seed', 'mask', 'outfile'],
                               output_names=['outfile']),
            name='clipSeedWithWhiteMatterNode')
        clipSeedWithWhiteMatterNode.inputs.outfile = 'clipped_wm_seed.nii.gz'
        master.connect(babc_wf, 'warpBABCtoFMRI.output_image',
                       clipSeedWithWhiteMatterNode, 'mask')
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn',
                       clipSeedWithWhiteMatterNode, 'seed')
        master.connect(clipSeedWithWhiteMatterNode, 'outfile', renameMasks2,
                       'in_file')
    # Labels are iterated over, so we need a seperate datasink to avoid overwriting any preprocessing
    # results when the labels are iterated (e.g. To3d output)
    # Write out to: /Shared/sinapse/CACHE/YYYYMMDD_<experiment>_Results/<SESSION>
    fmri_label_DataSink = dataio.fmriSink(master.base_dir, **args)
    master.connect(sessions, 'session_id', fmri_label_DataSink, 'container')
    master.connect(renameMasks2, 'out_file', fmri_label_DataSink, 'masks')
    master.connect(bandpass, 'out_file', fmri_label_DataSink,
                   'masks.@bandpass')

    roiMedian = afninodes.maskavenode('AFNI_1D', 'afni_roiMedian',
                                      '-mrange 1 1')
    master.connect(renameMasks2, 'out_file', roiMedian, 'mask')
    master.connect(bandpass, 'out_file', roiMedian, 'in_file')

    correlate = afninodes.fimnode('Correlation', 'afni_correlate')
    master.connect(roiMedian, 'out_file', correlate, 'ideal_file')
    master.connect(bandpass, 'out_file', correlate, 'in_file')

    regionLogCalc = afninodes.logcalcnode(outputType, 'afni_regionLogCalc')
    master.connect(correlate, 'out_file', regionLogCalc, 'in_file_a')

    renameZscore = pipe.Node(
        interface=Rename(format_string="%(session)s_%(label)s_zscore"),
        name='renameZscore')
    renameZscore.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore, 'session')
    master.connect(regionLogCalc, 'out_file', renameZscore, 'in_file')
    master.connect(renameZscore, 'out_file', fmri_label_DataSink, 'zscores')
    master.connect(t1_wf, 'warpT1toFMRI.output_image', fmri_label_DataSink,
                   'zscores.@t1Underlay')

    # Move z values back into NAC atlas space
    # master.connect(downsampleAtlas, 'outputVolume', lb_wf, 'warpLabeltoNAC.reference_image')
    master.connect(regionLogCalc, 'out_file', lb_wf,
                   'warpLabeltoNAC.input_image')

    renameZscore2 = pipe.Node(
        interface=Rename(format_string="%(session)s_%(label)s_result"),
        name='renameZscore2')
    renameZscore2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore2, 'session')
    master.connect(lb_wf, 'warpLabeltoNAC.output_image', renameZscore2,
                   'in_file')
    master.connect(renameZscore2, 'out_file', atlas_DataSink, 'Atlas.@zscore')
    # Connect seed subworkflow
    seedSubflow = seedWorkflow.workflow(args['seeds'],
                                        outputType='NIFTI_GZ',
                                        name='seed_wkfl')
    master.connect([
        (downsampleAtlas, seedSubflow, [('outputVolume',
                                         'afni3Dcalc_seeds.in_file_a')]),
        (seedSubflow, renameMasks, [('afni3Dcalc_seeds.out_file', 'in_file'),
                                    ('selectLabel.out', 'label')]),
        (seedSubflow, renameMasks2, [('selectLabel.out', 'label')]),
        (seedSubflow, renameZscore, [('selectLabel.out', 'label')]),
        (seedSubflow, renameZscore2, [('selectLabel.out', 'label')]),
        (seedSubflow, seed_wf, [('afni3Dcalc_seeds.out_file',
                                 'warpSeedtoFMRI.input_image')])
    ])
    imageDir = makeSupportDir(args['name'], "images")
    if args['plot']:
        registration.write_graph(dotfilename=os.path.join(
            imageDir, 'register.dot'),
                                 graph2use='orig',
                                 format='png',
                                 simple_form=False)
        if preprocessOn:
            preprocessing.write_graph(dotfilename=os.path.join(
                imageDir, 'preprocess.dot'),
                                      graph2use='orig',
                                      format='png',
                                      simple_form=False)
            nuisance.write_graph(dotfilename=os.path.join(
                imageDir, 'nuisance.dot'),
                                 graph2use='orig',
                                 format='png',
                                 simple_form=False)
        seedSubflow.write_graph(dotfilename=os.path.join(imageDir, 'seed.dot'),
                                graph2use='orig',
                                format='png',
                                simple_form=False)
        master.write_graph(dotfilename=os.path.join(imageDir, 'master.dot'),
                           graph2use="orig",
                           format='png',
                           simple_form=False)
    elif args['debug']:
        try:
            master.run(updatehash=True)
            # Run restingState on the all threads
            # Setup environment for CPU load balancing of ITK based programs.
            # --------
            # import multiprocessing
            # total_CPUS = 10  # multiprocessing.cpu_count()
            # master.run(plugin='MultiProc', plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
            # --------
            # Run restingState on the local cluster
            # master.run(plugin='SGE', plugin_args={'template': os.path.join(os.getcwd(), 'ENV/bin/activate'),
            #                                        'qsub_args': '-S /bin/bash -cwd'})  #, updatehash=True)
        except:
            pass
        master.name = "master"  # HACK: Bug in Graphviz for nodes beginning with numbers
        master.write_graph(dotfilename=os.path.join(imageDir,
                                                    'debug_hier.dot'),
                           graph2use="colored",
                           format='png')
        master.write_graph(dotfilename=os.path.join(imageDir,
                                                    'debug_orig.dot'),
                           graph2use="flat",
                           format='png')
    else:
        import multiprocessing
        total_CPUS = multiprocessing.cpu_count()
        master.run(plugin='MultiProc',
                   plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
    return 0
コード例 #24
0
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------
        name : name of workflow (default: 'registration')

    Inputs:

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs:

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = pe.Workflow(name=name)

    inputnode = pe.Node(interface=niu.IdentityInterface(fields=[
        'source_files', 'mean_image', 'anatomical_image', 'target_image',
        'target_image_brain', 'config_file'
    ]),
                        name='inputspec')
    outputnode = pe.Node(interface=niu.IdentityInterface(fields=[
        'func2anat_transform', 'anat2target_transform', 'transformed_files',
        'transformed_mean', 'anat2target', 'mean2anat_mask'
    ]),
                         name='outputspec')
    """
    Estimate the tissue classes from the anatomical image. But use spm's segment
    as FSL appears to be breaking.
    """

    stripper = pe.Node(fsl.BET(), name='stripper')
    register.connect(inputnode, 'anatomical_image', stripper, 'in_file')
    fast = pe.Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')
    """
    Binarize the segmentation
    """

    binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'),
                       name='binarize')
    pickindex = lambda x, i: x[i]
    register.connect(fast, ('partial_volume_files', pickindex, 2), binarize,
                     'in_file')
    """
    Calculate rigid transform from mean image to anatomical image
    """

    mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat')
    mean2anat.inputs.dof = 6
    register.connect(inputnode, 'mean_image', mean2anat, 'in_file')
    register.connect(stripper, 'out_file', mean2anat, 'reference')
    """
    Now use bbr cost function to improve the transform
    """

    mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr')
    mean2anatbbr.inputs.dof = 6
    mean2anatbbr.inputs.cost = 'bbr'
    mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'),
                                                'etc/flirtsch/bbr.sch')
    register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file')
    register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg')
    register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference')
    register.connect(mean2anat, 'out_matrix_file', mean2anatbbr,
                     'in_matrix_file')
    """
    Create a mask of the median image coregistered to the anatomical image
    """

    mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
    register.connect(mean2anatbbr, 'out_file', mean2anat_mask, 'in_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = pe.Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(mean2anatbbr, 'out_matrix_file', convert2itk,
                     'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = pe.Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.args = '--float'
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {
        'qsub_args': '-pe orte 4',
        'sbatch_args': '--mem=6G -c 4'
    }
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image_brain', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    pickfirst = lambda x: x[0]

    merge = pe.Node(niu.Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = pe.Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 0
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = 'file'

    register.connect(inputnode, 'target_image_brain', warpmean,
                     'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = pe.MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image'],
                         name='warpall')
    warpall.inputs.input_image_type = 0
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'

    register.connect(inputnode, 'target_image_brain', warpall,
                     'reference_image')
    register.connect(inputnode, 'source_files', warpall, 'input_image')
    register.connect(merge, 'out', warpall, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(warpall, 'output_image', outputnode, 'transformed_files')
    register.connect(mean2anatbbr, 'out_matrix_file', outputnode,
                     'func2anat_transform')
    register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')

    return register
コード例 #25
0
def localizer(name='localizer'):
    import nipype.interfaces.freesurfer as fs
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu
    import nipype.interfaces.io as nio
    wf = pe.Workflow(name=name)
    inputspec = pe.Node(niu.IdentityInterface(fields=[
        "subject_id", "subjects_dir", "overlay", 'reg', 'mean', 'hemi',
        'thresh', 'roi', "mask_overlay", "use_mask_overlay", "uthresh"
    ]),
                        name='inputspec')
    surf_label = pe.MapNode(niu.Function(input_names=[
        'vertex', 'hemi', 'subject', 'overlay', 'reg', 'sd', 'thresh'
    ],
                                         output_names=['filename', 'labels'],
                                         function=get_surface_label),
                            name='get_surface_label',
                            iterfield=['hemi', 'vertex'])
    #surf_label.inputs.hemi=['lh','rh']
    wf.connect(inputspec, 'hemi', surf_label, 'hemi')
    #surf_label.inputs.vertex = [61091, 60437]
    #surf_label.inputs.thresh = 1.5

    masker = pe.Node(niu.Function(
        input_names=['mask', 'overlay', 'use_mask_overlay', 'thresh'],
        output_names=['outfile'],
        function=mask_overlay),
                     name='mask_overlay')

    bg = pe.Node(niu.Function(input_names=['overlay', 'uthresh'],
                              output_names=['outfile'],
                              function=background),
                 name='background')
    wf.connect(inputspec, 'overlay', bg, 'overlay')
    wf.connect(inputspec, 'uthresh', bg, 'uthresh')
    wf.connect(inputspec, 'overlay', masker, 'overlay')
    wf.connect(inputspec, 'mask_overlay', masker, 'mask')
    wf.connect(inputspec, 'use_mask_overlay', masker, 'use_mask_overlay')
    wf.connect(inputspec, 'thresh', masker, 'thresh')
    wf.connect(masker, 'outfile', surf_label, 'overlay')

    wf.connect(inputspec, "subject_id", surf_label, "subject")
    wf.connect(inputspec, "subjects_dir", surf_label, "sd")
    #wf.connect(inputspec,"overlay",surf_label,"overlay")
    wf.connect(inputspec, "reg", surf_label, "reg")

    label2vol = pe.Node(niu.Function(input_names=[
        'subjects_dir', 'template_file', 'reg_file', 'label_file'
    ],
                                     output_names=['vol_label_file'],
                                     function=labelvol),
                        name='labels2vol')
    wf.connect(inputspec, 'subjects_dir', label2vol, 'subjects_dir')
    wf.connect(inputspec, 'mean', label2vol, 'template_file')
    wf.connect(inputspec, 'reg', label2vol, 'reg_file')
    wf.connect(surf_label, 'filename', label2vol, 'label_file')

    verts = pe.MapNode(niu.Function(input_names=[
        'sub', 'sd', 'overlay', 'reg', 'mean', 'hemi', 'roi', 'thresh'
    ],
                                    output_names=['vertex'],
                                    function=get_vertices),
                       name='get_verts',
                       iterfield=['hemi'])
    #verts.inputs.hemi = ['lh','rh']
    wf.connect(inputspec, 'hemi', verts, 'hemi')
    wf.connect(inputspec, 'subject_id', verts, 'sub')
    wf.connect(inputspec, 'subjects_dir', verts, 'sd')
    #wf.connect(inputspec,'overlay',verts,'overlay')
    wf.connect(masker, 'outfile', verts, 'overlay')
    wf.connect(inputspec, 'reg', verts, 'reg')
    wf.connect(inputspec, 'mean', verts, 'mean')
    wf.connect(inputspec, 'thresh', verts, 'thresh')
    wf.connect(inputspec, 'roi', verts, 'roi')
    wf.connect(verts, 'vertex', surf_label, 'vertex')
    wf.connect(inputspec, 'thresh', surf_label, 'thresh')

    from ...smri.freesurfer_brain_masks import pickaparc

    fssource = pe.Node(nio.FreeSurferSource(), name='fssource')
    wf.connect(inputspec, "subjects_dir", fssource, "subjects_dir")
    wf.connect(inputspec, "subject_id", fssource, "subject_id")

    bg_mask = pe.Node(fs.Binarize(wm_ven_csf=True, erode=2), name="bg_mask")

    wf.connect(fssource, ("aparc_aseg", pickaparc), bg_mask, "in_file")

    warp_mask = pe.Node(fs.ApplyVolTransform(inverse=True, interp='nearest'),
                        name="warp_to_func")
    wf.connect(inputspec, "mean", warp_mask, "source_file")
    wf.connect(bg_mask, "binary_file", warp_mask, "target_file")
    wf.connect(inputspec, "reg", warp_mask, "reg_file")

    do_bg_mask = pe.Node(fs.ApplyMask(), name="do_bg_mask")
    wf.connect(warp_mask, "transformed_file", do_bg_mask, "mask_file")

    studyref = pe.Node(niu.Function(input_names=['mean'],
                                    output_names=['study_ref'],
                                    function=study_ref),
                       name='studyref')
    wf.connect(inputspec, 'mean', studyref, 'mean')

    outputspec = pe.Node(niu.IdentityInterface(
        fields=['rois', 'reference', 'study_ref', 'labels']),
                         name='outputspec')

    wf.connect(studyref, 'study_ref', outputspec, 'study_ref')
    bin = pe.Node(fsl.ImageMaths(op_string='-bin'), name="binarize_roi")
    changetype = pe.Node(fsl.ChangeDataType(output_datatype='short',
                                            output_type='NIFTI'),
                         name='to_short')

    wf.connect(bg, 'outfile', do_bg_mask, "in_file")
    wf.connect(do_bg_mask, ("out_file", shorty), outputspec, 'reference')
    wf.connect(label2vol, 'vol_label_file', bin, 'in_file')
    wf.connect(bin, 'out_file', changetype, 'in_file')
    wf.connect(changetype, 'out_file', outputspec, 'rois')
    wf.connect(surf_label, 'labels', outputspec, 'labels')
    return wf
コード例 #26
0
ファイル: daic2hcp.py プロジェクト: ericearl/daic2hcp
def generate_workflow(**inputs):
    """
    generates computation graph for daic2hcp
    :param t1w_file: t1w mgz file in some space
    :param t2w_file: t2w mgz file aligned to t1w
    :param mask_file: mask or brain mgz file aligned to t1w
    :param fmri_files: list of fmri mgz files
    :param rs_files: list of fc-preprocessed resting state fmri mgz files
    :param output_dir: desired output "HCP" directory
    :return: nipype workflow
    """
    # setup variables
    subject_id = inputs['subjectid']
    reference = os.path.join(os.environ['HCPPIPEDIR_Templates'],
                             'MNI152_T1_1mm.nii.gz')
    reference2mm = os.path.join(os.environ['HCPPIPEDIR_Templates'],
                                "MNI152_T1_2mm.nii.gz")

    # io spec
    input_spec = pe.Node(nipype.IdentityInterface(
        fields=['t1w_file', 't2w_file', 'mask_file']),
        name='input_spec'
    )
    input_func_spec = pe.Node(nipype.IdentityInterface(fields=['fmri_file']),
                              name='input_func_spec')
    hcp_spec = pe.Node(nipype.IdentityInterface(
        fields=['t1w', 't2w', 't1w_acpc_xfm', 't2w_acpc_xfm',
                't2w_to_t1w_xfm', 't1w_distortion', 't2w_distortion',
                'bias_field', 't1w_res', 't2w_res', 't2w_dc', 't1w_res_brain',
                't2w_res_brain', 'wmparc', 'wmparc_1mm', 'fs2standard',
                'standard2fs']),
        name='hcp_spec'
    )

    # connect input DAIC files
    input_spec.inputs.t1w_file = inputs['t1w_file']
    input_spec.inputs.t2w_file = inputs['t2w_file']
    input_spec.inputs.mask_file = inputs['mask_file']
    input_func_spec.iterables = ('fmri_file', inputs['fmri_files'] + inputs[
        'rs_files'])

    # setup HCP directory specification
    output_dir = os.path.abspath(inputs['output_dir'])
    subjects_dir = os.path.join(output_dir, 'T1w')
    freesurfer_dir = os.path.join(subjects_dir, subject_id)
    native_xfms_dir = os.path.join(subjects_dir, 'xfms')
    t2w_dir = os.path.join(output_dir, 'T2w')
    t2w_xfms_dir = os.path.join(t2w_dir, 'xfms')
    nonlinear_dir = os.path.join(output_dir, 'MNINonLinear')
    results_dir = os.path.join(nonlinear_dir, 'Results')
    nonlin_xfms_dir = os.path.join(nonlinear_dir, 'xfms')
    fs_transforms = os.path.join(freesurfer_dir, 'mri', 'transforms')

    # create directory tree
    for directory in [output_dir, subjects_dir, native_xfms_dir, t2w_dir,
                      t2w_xfms_dir, results_dir, nonlin_xfms_dir]:
        os.makedirs(directory, exist_ok=True)
    if not os.path.isdir(freesurfer_dir):
        shutil.copytree(inputs['fs_source_dir'], freesurfer_dir)
    os.makedirs(fs_transforms, exist_ok=True)

    def get_name(x):
        boldid = re.compile(r'(BOLD[0-9]*).*')
        number = re.compile(r'rsBOLD_data_scan([0-9]*).*')
        basename = os.path.basename(x).split('.')[0]
        match = boldid.match(basename)
        if match is not None:
            name = match.groups()[0]
        else:
            name = 'fcproc%s' % number.match(basename).groups()[0]
        taskname = 'task-%s' % name
        return taskname

    fmrinames = map(get_name, inputs['fmri_files'] + inputs['rs_files'])
    for fmriname in fmrinames:
        directory = os.path.join(results_dir, fmriname)
        os.makedirs(directory, exist_ok=True)

    # HCP filename specification
    hcp_spec.inputs.t1w = os.path.join(subjects_dir, 'T1w.nii.gz')
    hcp_spec.inputs.t2w = os.path.join(t2w_dir, 'T2w.nii.gz')
    hcp_spec.inputs.t1w_acpc_xfm = os.path.join(native_xfms_dir, 'acpc.mat')
    hcp_spec.inputs.t2w_acpc_xfm = os.path.join(t2w_xfms_dir, 'acpc.mat')
    hcp_spec.inputs.t2w_to_t1w_xfm = os.path.join(fs_transforms, 'T2wtoT1w.mat')
    hcp_spec.inputs.t1w_distortion = os.path.join(
        native_xfms_dir, 'T1w_dc.nii.gz')
    hcp_spec.inputs.t2w_distortion = os.path.join(
        native_xfms_dir, 'T2w_reg_dc.nii.gz')
    hcp_spec.inputs.bias_field = os.path.join(
        subjects_dir, 'BiasField_acpc_dc.nii.gz')
    hcp_spec.inputs.t1w_res = os.path.join(
        subjects_dir, 'T1w_acpc_dc_restore.nii.gz')
    hcp_spec.inputs.t1w_res_brain = os.path.join(
        subjects_dir, 'T1w_acpc_dc_restore_brain.nii.gz')
    hcp_spec.inputs.t2w_res = os.path.join(
        subjects_dir, 'T2w_acpc_dc_restore.nii.gz')
    hcp_spec.inputs.t2w_dc = os.path.join(
        subjects_dir, 'T2w_acpc_dc.nii.gz')
    hcp_spec.inputs.t2w_res_brain = os.path.join(
        subjects_dir, 'T2w_acpc_dc_restore_brain.nii.gz')
    hcp_spec.inputs.wmparc = os.path.join(subjects_dir, 'wmparc.nii.gz')
    hcp_spec.inputs.wmparc_1mm = os.path.join(subjects_dir, 'wmparc_1mm.nii.gz')
    hcp_spec.inputs.warp = os.path.join(nonlin_xfms_dir, 'fs2standard.nii.gz')

    ## create workflow components

    # utility
    def rename_func(in_file, path):
        # conditional renaming for func, fcpreproc data handled separately
        import os
        import re
        import shutil
        func_pattern = re.compile(r'(?P<name>BOLD[0-9]*).*')
        fc_pattern = re.compile(r'rsBOLD_data_scan(?P<number>[0-9]*).*')
        basename = os.path.basename(in_file)
        name = func_pattern.match(basename)
        number = fc_pattern.match(basename)
        if name:
            taskname = 'task-%s' % name.groupdict()['name']
        elif number:
            taskname = 'task-fcproc%s' % number.groupdict()['number']
        out_file = os.path.join(path, 'MNINonLinear', 'Results',
                               taskname, taskname + '.nii.gz')
        shutil.copyfile(in_file, out_file)
        return out_file
    rename = pe.Node(utility.Function(input_names=['in_file', 'path'],
                                      output_names=['out_file'],
                                      function=rename_func),
                     name='rename')
    rename.inputs.path = os.path.abspath(output_dir)
    copy_str = 'def f(src, dest): shutil.copy(src, dest); return dest'
    copy = pe.Node(
        utility.Function(
            input_names=['src', 'dest'], output_names=['dest'],
            imports=['import shutil'], function_str=copy_str
        ),
        name='copy'
    )
    basename_str = 'def f(path): return osp.basename(path).split(".")[0]'
    basename = pe.Node(
        utility.Function(
            input_names=['path'], output_names=['out_name'],
            imports=['import os.path as osp'], function_str=basename_str
        ),
        name='basename'
    )

    # mri convert
    convert_t1 = pe.Node(freesurfer.MRIConvert(out_type='niigz'),
                         name='convert_t1')
    convert_t2 = convert_t1.clone(name='convert_t2')
    convert_mask = convert_t1.clone(name='convert_mask')
    convert_func = pe.Node(freesurfer.MRIConvert(out_type='niigz'),
                           name='convert_func')

    # acpc alignment
    calc_acpc = pe.Node(
        fsl.FLIRT(reference=reference, dof=6, interp='spline'),
        name='calc_acpc'
    )
    copy_xfm = copy.clone(name='copy_xfm')
    apply_acpc = pe.Node(
        fsl.FLIRT(reference=reference, apply_xfm=True, interp='spline'),
        name='apply_acpc'
    )
    copy_t2w_res = copy.clone(name='copy_t2w_res')
    apply_acpc_nn = pe.Node(
        fsl.FLIRT(reference=reference, apply_xfm=True,
                  interp='nearestneighbour'),
        name='apply_acpc_nn'
    )
    mask_t1w = pe.Node(fsl.ApplyMask(), name='mask_t1w')
    mask_t2w = pe.Node(fsl.ApplyMask(), name='mask_t2w')
    resample_mask = pe.Node(
        fsl.FLIRT(apply_isoxfm=1, interp='nearestneighbour'),
        name='resample_mask'
    )

    # functional transforms
    select_first = pe.JoinNode(
        utility.Select(index=[0]),
        joinsource='input_func_spec',
        joinfield='inlist',
        name='select_first'
    )
    fs_to_fmri = pe.Node(fsl.FLIRT(cost='mutualinfo', dof=6), name='fs_to_func')
    fmri_to_fs = pe.Node(fsl.ConvertXFM(invert_xfm=True), name='func_to_fs')
    concat_warps = pe.Node(
        fsl.ConvertWarp(relwarp=True, out_relwarp=True, reference=reference2mm),
        name='concat_warps'
    )
    warp_mask = pe.Node(
        fsl.ApplyWarp(ref_file=reference2mm, interp='nn', relwarp=True),
        name='warp_mask'
    )
    mask_func = pe.Node(fsl.ApplyMask(), name='apply_mask')
    apply_warpfield = pe.Node(
        fsl.ApplyWarp(ref_file=reference2mm, interp='spline', relwarp=True),
        name='apply_warpfield'
    )
    timeseries_mean = pe.Node(
        fsl.MeanImage(dimension='T'), name='timeseries_mean'
    )
    renamesb = pe.Node(
        utility.Rename(parse_string=r'task-(?P<name>.*)_.*',
                       format_string='%(path)s/MNINonLinear/Results/'
                                     'task-%(name)s/task-%(name)s_SBRef.nii.gz',
                       path=os.path.abspath(output_dir)),
        name='renamesb'
    )
    renamesb.inputs.path = os.path.abspath(output_dir)

    # identity transforms
    identity_matrix = pe.Node(fsl.preprocess.FLIRT(),
                              name='identity_matrix')  # t1 -> t1 matrix
    zeroes = pe.Node(fsl.BinaryMaths(operation='mul', operand_value=0),
                     name='zeroes')
    repeat_zeroes = pe.Node(utility.Merge(3), name='repeat_zeroes')
    identity_warpfield = pe.Node(fsl.Merge(dimension='t'),
                                 name='identity_warpfield')  # 3D warp in t-dim
    identity_biasfield = pe.Node(fsl.BinaryMaths(operation='add',
                                                 operand_value=1),
                                 name='identity_biasfield')  # bias 1 everywhere
    copy_warpfield = copy.clone(name='copy_warpfield')

    # hcp nodes
    postfreesurfer, fmrisurface = create_hcp_nodes(output_dir, subject_id)
    executivesummary = pe.JoinNode(
        ExecutiveSummary(in_unprocessed=os.path.abspath(output_dir),
                         in_processed=os.path.abspath(output_dir),
                         in_subjectid=subject_id,
                         in_executivesummary='executivesummary'),
        joinfield='in_files',
        joinsource='input_func_spec',
        name='executivesummary'
    )

    ## workflow DAG
    wf = pe.Workflow(name=inputs['workflow_name'], base_dir=inputs['base_dir'])

    # convert to nii.gz
    wf.connect(
        [(input_spec, convert_t1, [('t1w_file', 'in_file')]),
         (input_spec, convert_t2, [('t2w_file', 'in_file')]),
         (input_spec, convert_mask, [('mask_file', 'in_file')])]
    )
    # rigid align to acpc/MNI, apply mask
    wf.connect(
        [(convert_t1, calc_acpc, [('out_file', 'in_file')]),
         (calc_acpc, copy_xfm, [('out_matrix_file', 'src')]),
         (copy_xfm, apply_acpc, [('dest', 'in_matrix_file')]),
         (calc_acpc, apply_acpc_nn, [('out_matrix_file', 'in_matrix_file')]),
         (convert_t2, apply_acpc, [('out_file', 'in_file')]),
         (apply_acpc, copy_t2w_res, [('out_file', 'src')]),
         (convert_mask, apply_acpc_nn, [('out_file', 'in_file')]),
         (calc_acpc, mask_t1w, [('out_file', 'in_file')]),
         (apply_acpc_nn, mask_t1w, [('out_file', 'mask_file')]),
         (apply_acpc, mask_t2w, [('out_file', 'in_file')]),
         (apply_acpc_nn, mask_t2w, [('out_file', 'mask_file')]),
         (apply_acpc_nn, resample_mask, [('out_file', 'in_file'),
                                         ('out_file', 'reference')])]
    )
    # create identity transformations for data flow
    wf.connect(
        [(calc_acpc, identity_matrix, [('out_file', 'in_file'),
                                       ('out_file', 'reference')]),
         (calc_acpc, zeroes, [('out_file', 'in_file')]),
         (zeroes, repeat_zeroes, [('out_file', 'in1'), ('out_file', 'in2'),
                                  ('out_file', 'in3')]),
         (repeat_zeroes, identity_warpfield, [('out', 'in_files')]),
         (zeroes, identity_biasfield, [('out_file', 'in_file')]),
         (identity_warpfield, copy_warpfield, [('merged_file', 'src')])]
    )
    # connect postfreesurfer
    # there are more implicit connections, but these suffice dependency graph
    wf.connect(
        [(calc_acpc, postfreesurfer, [('out_file', 'in_t1')]),
         (copy_t2w_res, postfreesurfer, [('dest', 'in_t1_dc')]),
         (identity_warpfield, postfreesurfer, [('merged_file', 'in_warpfield')]),
         (identity_biasfield, postfreesurfer, [('out_file', 'in_biasfield')]),
         (copy_warpfield, postfreesurfer, [('dest', 'in_t2warpfield')]),
         (resample_mask, postfreesurfer, [('out_file', 'in_wmparc')]),
         (mask_t1w, postfreesurfer, [('out_file', 'in_t1brain')]),
         (mask_t2w, postfreesurfer, [('out_file', 'in_t2brain')]),
         (identity_matrix, postfreesurfer, [('out_file', 'in_t2_to_t1')])]
    )
    # transform functionals to final space
    # @TODO leverage SELECT and RENAME utilities with Don's information. In
    #  the interim, functional data is simply named as task-BOLD##
    wf.connect(
        [(input_func_spec, convert_func, [('fmri_file', 'in_file')]),
         (convert_func, select_first, [('out_file', 'inlist')]),
         (convert_t1, fs_to_fmri, [('out_file', 'in_file')]),
         (select_first, fs_to_fmri, [('out', 'reference')]),
         (fs_to_fmri, fmri_to_fs, [('out_matrix_file', 'in_file')]),
         (postfreesurfer, concat_warps, [('out_warp', 'warp1')]),
         (fmri_to_fs, concat_warps, [('out_file', 'premat')]),
         (concat_warps, apply_warpfield, [('out_file', 'field_file')]),
         (convert_func, apply_warpfield, [('out_file', 'in_file')]),
         (convert_mask, warp_mask, [('out_file', 'in_file')]),
         (postfreesurfer, warp_mask, [('out_warp', 'field_file')]),
         (warp_mask, mask_func, [('out_file', 'mask_file')]),
         (apply_warpfield, mask_func, [('out_file', 'in_file')])]
    )
    # connect fmrisurface
    # there are more implicit connections, but these suffice dependency graph
    wf.connect(
        [(mask_func, rename, [('out_file', 'in_file')]),
         (rename, timeseries_mean, [('out_file', 'in_file')]),
         (rename, fmrisurface, [('out_file', 'in_fmri')]),
         (rename, basename, [('out_file', 'path')]),
         (basename, fmrisurface, [('out_name', 'fmriname')]),
         (timeseries_mean, renamesb, [('out_file', 'in_file')]),
         (renamesb, fmrisurface, [('out_file', 'in_sbref')])]
    )
    # connect executivesummary
    wf.connect(
        [(fmrisurface, executivesummary, [('out_file', 'in_files')])]
    )

    # draw workflow: output/daic2hcp/graph.png
    wf.write_graph(graph2use='orig', dotfilename='graph.dot')

    # connect intermediates to hcp filename specifications (not shown in graph)
    wf.connect(
        [(hcp_spec, convert_t1, [('t1w', 'out_file')]),
         (hcp_spec, convert_t2, [('t2w', 'out_file')]),
         (hcp_spec, convert_mask, [('wmparc', 'out_file')]),
         (hcp_spec, calc_acpc, [('t1w_acpc_xfm', 'out_matrix_file')]),
         (hcp_spec, identity_matrix, [('t2w_to_t1w_xfm', 'out_matrix_file')]),
         (hcp_spec, identity_warpfield, [('t1w_distortion', 'merged_file')]),
         (hcp_spec, identity_biasfield, [('bias_field', 'out_file')]),
         (hcp_spec, copy_warpfield, [('t2w_distortion', 'dest')]),
         (hcp_spec, calc_acpc, [('t1w_res', 'out_file')]),
         (hcp_spec, apply_acpc, [('t2w_res', 'out_file')]),
         (hcp_spec, copy_xfm, [('t2w_acpc_xfm', 'dest')]),
         (hcp_spec, copy_t2w_res, [('t2w_dc', 'dest')]),
         (hcp_spec, mask_t1w, [('t1w_res_brain', 'out_file')]),
         (hcp_spec, mask_t2w, [('t2w_res_brain', 'out_file')]),
         (hcp_spec, resample_mask, [('wmparc_1mm', 'out_file')]),
         (hcp_spec, concat_warps, [('warp', 'out_file')])]
    )

    return wf
コード例 #27
0
datasink.inputs.base_directory = os.path.abspath('./fslresting/compcorred')
"""
Set up complete workflow
------------------------
"""


def get_substitutions(subject_id):
    '''Replace output names of files with more meaningful ones
    '''
    return [('vol0000_warp_merged_detrended_regfilt_filt',
             '%s_filtered' % subject_id),
            ('vol0000_warp_merged_tsnr_stddev_thresh',
             '%s_noisyvoxels' % subject_id)]


l1pipeline = pe.Workflow(name="resting")
l1pipeline.base_dir = os.path.abspath('./fslresting/workingdir')
l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
                    (datasource, restingflow, [('func', 'inputspec.func')]),
                    (infosource, datasink, [('subject_id', 'container'),
                                            (('subject_id', get_substitutions),
                                             'substitutions')]),
                    (restingflow, datasink,
                     [('outputspec.noise_mask_file', '@noisefile'),
                      ('outputspec.filtered_file', '@filteredfile')])])

if __name__ == '__main__':
    l1pipeline.run()
    l1pipeline.write_graph()
コード例 #28
0
def ANTSTemplateBuildSingleIterationWF(iterationPhasePrefix=''):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """

    TemplateBuildSingleIterationWF = pe.Workflow(
        name='ANTSTemplateBuildSingleIterationWF_' +
        str(str(iterationPhasePrefix)))

    inputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['images', 'fixed_image', 'ListOfPassiveImagesDictionaries']),
                        run_without_submitting=True,
                        name='inputspec')
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['template', 'transforms_list', 'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=ANTS(),
                           name='BeginANTS',
                           iterfield=['moving_image'])
    BeginANTS.inputs.dimension = 3
    BeginANTS.inputs.output_transform_prefix = str(
        iterationPhasePrefix) + '_tfm'
    BeginANTS.inputs.metric = ['CC']
    BeginANTS.inputs.metric_weight = [1.0]
    BeginANTS.inputs.radius = [5]
    BeginANTS.inputs.transformation_model = 'SyN'
    BeginANTS.inputs.gradient_step_length = 0.25
    BeginANTS.inputs.number_of_iterations = [50, 35, 15]
    BeginANTS.inputs.number_of_affine_iterations = [
        10000, 10000, 10000, 10000, 10000
    ]
    BeginANTS.inputs.use_histogram_matching = True
    BeginANTS.inputs.mi_option = [32, 16000]
    BeginANTS.inputs.regularization = 'Gauss'
    BeginANTS.inputs.regularization_gradient_field_sigma = 3
    BeginANTS.inputs.regularization_deformation_field_sigma = 0
    TemplateBuildSingleIterationWF.connect(inputSpec, 'images', BeginANTS,
                                           'moving_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS,
                                           'fixed_image')

    MakeTransformsLists = pe.Node(interface=util.Function(
        function=MakeListsOfTransformLists,
        input_names=['warpTransformList', 'AffineTransformList'],
        output_names=['out']),
                                  run_without_submitting=True,
                                  name='MakeTransformsLists')
    MakeTransformsLists.inputs.ignore_exception = True
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform',
                                           MakeTransformsLists,
                                           'warpTransformList')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform',
                                           MakeTransformsLists,
                                           'AffineTransformList')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(
        interface=WarpImageMultiTransform(),
        iterfield=['transformation_series', 'input_image'],
        name='wimtdeformed')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'images', wimtdeformed,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(MakeTransformsLists, 'out',
                                           wimtdeformed,
                                           'transformation_series')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(),
                                name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(
        iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image",
                                           AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(),
                                 name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(
        iterationPhasePrefix) + '_Affine.mat'
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform',
                                           AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(
        iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform',
                                           AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(),
                                    name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(
        iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages,
                                           'output_average_image',
                                           GradientStepWarpImage,
                                           'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=WarpImageMultiTransform(),
                                  name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_affine = [1]
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           UpdateTemplateShape,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(AvgAffineTransform,
                                           'affine_transform',
                                           UpdateTemplateShape,
                                           'transformation_series')
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage,
                                           'output_product_image',
                                           UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(
        interface=util.Function(
            function=MakeTransformListWithGradientWarps,
            input_names=['averageAffineTranform', 'gradientStepWarp'],
            output_names=['TransformListWithGradientWarps']),
        run_without_submitting=True,
        name='MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(
        AvgAffineTransform, 'affine_transform',
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(
        UpdateTemplateShape, 'output_image',
        ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(
        interface=WarpImageMultiTransform(),
        name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_affine = [1]
    ReshapeAverageImageWithShapeUpdate.inputs.out_postfix = '_Reshaped'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate,
        'transformation_series')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate,
                                           'output_image', outputSpec,
                                           'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(
        Function(function=FlattenTransformAndImagesList,
                 input_names=[
                     'ListOfPassiveImagesDictionaries', 'transformation_series'
                 ],
                 output_names=[
                     'flattened_images', 'flattened_transforms',
                     'flattened_image_nametypes'
                 ]),
        run_without_submitting=True,
        name="99_FlattenTransformAndImagesList")
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfPassiveImagesDictionaries',
                                           FlattenTransformAndImagesListNode,
                                           'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(MakeTransformsLists, 'out',
                                           FlattenTransformAndImagesListNode,
                                           'transformation_series')
    wimtPassivedeformed = pe.MapNode(
        interface=WarpImageMultiTransform(),
        iterfield=['transformation_series', 'input_image'],
        name='wimtPassivedeformed')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           wimtPassivedeformed,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_images',
                                           wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_transforms',
                                           wimtPassivedeformed,
                                           'transformation_series')

    RenestDeformedPassiveImagesNode = pe.Node(
        Function(
            function=RenestDeformedPassiveImages,
            input_names=['deformedPassiveImages', 'flattened_image_nametypes'],
            output_names=[
                'nested_imagetype_list', 'outputAverageImageName_list',
                'image_type_list'
            ]),
        run_without_submitting=True,
        name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image',
                                           RenestDeformedPassiveImagesNode,
                                           'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_image_nametypes',
                                           RenestDeformedPassiveImagesNode,
                                           'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(
        interface=AverageImages(),
        iterfield=['images', 'output_average_image'],
        name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "nested_imagetype_list",
                                           AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "outputAverageImageName_list",
                                           AvgDeformedPassiveImages,
                                           'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(
        interface=WarpImageMultiTransform(),
        iterfield=['input_image', 'reference_image', 'out_postfix'],
        name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_affine = [1]
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, "image_type_list",
        ReshapeAveragePassiveImageWithShapeUpdate, 'out_postfix')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps',
        ReshapeAveragePassiveImageWithShapeUpdate, 'transformation_series')
    TemplateBuildSingleIterationWF.connect(
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec,
        'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
コード例 #29
0
def run_mean_functional(func_reorient, out_dir=None, run=True):
    """Run the 'mean_functional_workflow' function to execute the modular
    workflow with the provided inputs.

    - This workflow will NOT remove background noise.

    :type func_reorient: str
    :param func_reorient: Filepath to the deobliqued, reoriented functional
                          timeseries.
    :type out_dir: str
    :param out_dir: (default: None) The output directory to write the results
                    to; if left as None, will write to the current directory.
    :type run: bool
    :param run: (default: True) Will run the workflow; if set to False, will
                connect the Nipype workflow and return the workflow object
                instead.
    :rtype: str
    :return: (if run=True) The filepath of the generated anatomical_reorient
             file.
    :rtype: Nipype workflow object
    :return: (if run=False) The connected Nipype workflow object.
    :rtype: str
    :return: (if run=False) The base directory of the workflow if it were to
             be run.
    """

    import os
    import glob

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    output = "mean_functional"
    workflow = pe.Workflow(name='%s_workflow' % output)

    if not out_dir:
        out_dir = os.getcwd()

    workflow_dir = os.path.join(out_dir, "workflow_output", output)
    workflow.base_dir = workflow_dir

    resource_pool = {}
    config = {}
    num_cores_per_subject = 1

    resource_pool["func_reorient"] = func_reorient

    workflow, resource_pool = \
        mean_functional_workflow(workflow, resource_pool, config)

    ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
    ds.inputs.base_directory = workflow_dir
    
    node, out_file = resource_pool[output]

    workflow.connect(node, out_file, ds, output)

    if run:
        workflow.run(plugin='MultiProc', plugin_args= \
                         {'n_procs': num_cores_per_subject})
        outpath = glob.glob(os.path.join(workflow_dir, "mean_functional",
                                         "*"))[0] 
        return outpath
    else:
        return workflow, workflow.base_dir
コード例 #30
0
def CreateJointFusionWorkflow(WFname,
                              onlyT1,
                              master_config,
                              runFixFusionLabelMap=True):
    from nipype.interfaces import ants

    if onlyT1:
        n_modality = 1
    else:
        n_modality = 2
    CLUSTER_QUEUE = master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']

    JointFusionWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(
        interface=IdentityInterface(fields=[
            'subj_t1_image',  # Desired image to create label map for
            'subj_t2_image',  # Desired image to create label map for
            'subj_lmks',  # The landmarks corresponding to t1_image
            'subj_fixed_head_labels',
            # The fixed head labels from BABC
            'subj_posteriors',  # The BABC posteriors
            'subj_left_hemisphere',  # The warped left hemisphere mask
            'atlasWeightFilename',  # The static weights file name
            'labelBaseFilename'
            # Atlas label base name ex) neuro_lbls.nii.gz
        ]),
        run_without_submitting=True,
        name='inputspec')
    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'JointFusion_HDAtlas20_2015_label',
        'JointFusion_HDAtlas20_2015_CSFVBInjected_label',
        'JointFusion_HDAtlas20_2015_fs_standard_label',
        'JointFusion_HDAtlas20_2015_lobe_label',
        'JointFusion_extended_snapshot',
        'JointFusion_HDAtlas20_2015_dustCleaned_label',
        'JointFusion_volumes_csv', 'JointFusion_volumes_json',
        'JointFusion_lobe_volumes_csv', 'JointFusion_lobe_volumes_json'
    ]),
                          run_without_submitting=True,
                          name='outputspec')

    BLICreator = dict()
    A2SantsRegistrationPreJointFusion_SyN = dict()
    movingROIAuto = dict()
    labelMapResample = dict()
    NewlabelMapResample = dict()

    jointFusion_atlas_mergeindex = 0
    merge_input_offset = 1  # Merge nodes are indexed from 1, not zero!
    """
    multimodal ants registration if t2 exists
    """
    sessionMakeMultimodalInput = pe.Node(Function(
        function=MakeVector,
        input_names=['inFN1', 'inFN2', 'jointFusion'],
        output_names=['outFNs']),
                                         run_without_submitting=True,
                                         name="sessionMakeMultimodalInput")
    sessionMakeMultimodalInput.inputs.jointFusion = False
    JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                          sessionMakeMultimodalInput, 'inFN1')
    """
    T2 resample to T1 average image
    :: BRAINSABC changed its behavior to retain image's original spacing & origin
    :: Since antsJointFusion only works for the identical origin images for targets,
    :: Resampling is placed at this stage
    """
    subjectT2Resample = pe.Node(interface=BRAINSResample(),
                                name="BRAINSResample_T2_forAntsJointFusion")
    if not onlyT1:
        subjectT2Resample.plugin_args = {
            'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
            'overwrite': True
        }
        subjectT2Resample.inputs.pixelType = 'short'
        subjectT2Resample.inputs.interpolationMode = 'Linear'
        subjectT2Resample.inputs.outputVolume = "t2_resampled_in_t1.nii.gz"
        # subjectT2Resample.inputs.warpTransform= "Identity" # Default is "Identity"

        JointFusionWF.connect(inputsSpec, 'subj_t1_image', subjectT2Resample,
                              'referenceVolume')
        JointFusionWF.connect(inputsSpec, 'subj_t2_image', subjectT2Resample,
                              'inputVolume')

        JointFusionWF.connect(subjectT2Resample, 'outputVolume',
                              sessionMakeMultimodalInput, 'inFN2')
    else:
        pass

    # print('jointFusion_atlas_db_base')
    print("master_config")
    print(master_config)
    print("master_config['jointfusion_atlas_db_base']")
    print(master_config['jointfusion_atlas_db_base'])
    jointFusionAtlasDict = readMalfAtlasDbBase(
        master_config['jointfusion_atlas_db_base'])
    number_of_atlas_sources = len(jointFusionAtlasDict)
    jointFusionAtlases = dict()
    atlasMakeMultimodalInput = dict()
    t2Resample = dict()
    warpedAtlasLblMergeNode = pe.Node(interface=Merge(number_of_atlas_sources),
                                      name="LblMergeAtlas")
    NewwarpedAtlasLblMergeNode = pe.Node(
        interface=Merge(number_of_atlas_sources), name="fswmLblMergeAtlas")
    # "HACK NOT to use T2 for JointFusion only"
    # warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources*n_modality),name="MergeAtlases")
    warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources *
                                                     1),
                                     name="MergeAtlases")

    ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
    UseRegistrationMasking = True
    if UseRegistrationMasking == True:
        from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto

        fixedROIAuto = pe.Node(interface=BRAINSROIAuto(),
                               name="fixedROIAUTOMask")
        fixedROIAuto.inputs.ROIAutoDilateSize = 10
        fixedROIAuto.inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz"
        JointFusionWF.connect(inputsSpec, 'subj_t1_image', fixedROIAuto,
                              'inputVolume')

    for jointFusion_atlas_subject in list(jointFusionAtlasDict.keys()):
        ## Need DataGrabber Here For the Atlas
        jointFusionAtlases[jointFusion_atlas_subject] = pe.Node(
            interface=IdentityInterface(
                fields=['t1', 't2', 'label', 'lmks', 'registration_mask']),
            name='jointFusionAtlasInput' + jointFusion_atlas_subject)
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.t1 = jointFusionAtlasDict[
                jointFusion_atlas_subject]['t1']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.t2 = jointFusionAtlasDict[
                jointFusion_atlas_subject]['t2']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.label = jointFusionAtlasDict[
                jointFusion_atlas_subject]['label']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.lmks = jointFusionAtlasDict[
                jointFusion_atlas_subject]['lmks']
        jointFusionAtlases[jointFusion_atlas_subject].inputs.registration_mask = \
        jointFusionAtlasDict[jointFusion_atlas_subject]['registration_mask']
        ## Create BLI first
        ########################################################
        # Run BLI atlas_to_subject
        ########################################################
        BLICreator[jointFusion_atlas_subject] = pe.Node(
            interface=BRAINSLandmarkInitializer(),
            name="BLI_" + jointFusion_atlas_subject)
        BLICreator[
            jointFusion_atlas_subject].inputs.outputTransformFilename = "landmarkInitializer_{0}_to_subject_transform.h5".format(
                jointFusion_atlas_subject)

        JointFusionWF.connect(inputsSpec, 'atlasWeightFilename',
                              BLICreator[jointFusion_atlas_subject],
                              'inputWeightFilename')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'lmks', BLICreator[jointFusion_atlas_subject],
                              'inputMovingLandmarkFilename')
        JointFusionWF.connect(inputsSpec, 'subj_lmks',
                              BLICreator[jointFusion_atlas_subject],
                              'inputFixedLandmarkFilename')

        ##### Initialize with ANTS Transform For SyN
        currentAtlasToSubjectantsRegistration = 'SyN_AtlasToSubjectANTsPreJointFusion_' + jointFusion_atlas_subject
        A2SantsRegistrationPreJointFusion_SyN[
            jointFusion_atlas_subject] = pe.Node(
                interface=ants.Registration(),
                name=currentAtlasToSubjectantsRegistration)
        many_cpu_ANTsSyN_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 4, 2, 16),
            'overwrite': True
        }
        A2SantsRegistrationPreJointFusion_SyN[
            jointFusion_atlas_subject].plugin_args = many_cpu_ANTsSyN_options_dictionary
        if onlyT1:
            JFregistrationTypeDescription = "FiveStageAntsRegistrationT1Only"
        else:
            JFregistrationTypeDescription = "FiveStageAntsRegistrationMultiModal"
        CommonANTsRegistrationSettings(
            antsRegistrationNode=A2SantsRegistrationPreJointFusion_SyN[
                jointFusion_atlas_subject],
            registrationTypeDescription=JFregistrationTypeDescription,
            output_transform_prefix=jointFusion_atlas_subject +
            '_ToSubjectPreJointFusion_SyN',
            output_warped_image=jointFusion_atlas_subject + '_2subject.nii.gz',
            output_inverse_warped_image=None,  # NO NEED FOR THIS
            save_state=None,  # NO NEED FOR THIS
            invert_initial_moving_transform=False,
            initial_moving_transform=None)

        ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
        if UseRegistrationMasking == True:
            from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto
            JointFusionWF.connect(
                fixedROIAuto, 'outputROIMaskVolume',
                A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'fixed_image_mask')
            # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels',
            #                       A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'fixed_image_mask')

            # NOTE: Moving image mask can be taken from Atlas directly so that it does not need to be read in
            # movingROIAuto[jointFusion_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="movingROIAUTOMask_"+jointFusion_atlas_subject)
            # movingROIAuto.inputs.ROIAutoDilateSize=10
            # movingROIAuto[jointFusion_atlas_subject].inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz"
            # JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject], 't1', movingROIAuto[jointFusion_atlas_subject],'inputVolume')
            # JointFusionWF.connect(movingROIAuto[jointFusion_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'moving_image_mask')
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject],
                'registration_mask', A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'moving_image_mask')

        JointFusionWF.connect(
            BLICreator[jointFusion_atlas_subject], 'outputTransformFilename',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'initial_moving_transform')
        """
        make multimodal input for atlases
        """
        atlasMakeMultimodalInput[jointFusion_atlas_subject] = pe.Node(
            Function(function=MakeVector,
                     input_names=['inFN1', 'inFN2', 'jointFusion'],
                     output_names=['outFNs']),
            run_without_submitting=True,
            name="atlasMakeMultimodalInput" + jointFusion_atlas_subject)
        atlasMakeMultimodalInput[
            jointFusion_atlas_subject].inputs.jointFusion = False
        JointFusionWF.connect(
            jointFusionAtlases[jointFusion_atlas_subject], 't1',
            atlasMakeMultimodalInput[jointFusion_atlas_subject], 'inFN1')

        if not onlyT1:
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject], 't2',
                atlasMakeMultimodalInput[jointFusion_atlas_subject], 'inFN2')
        else:
            pass

        JointFusionWF.connect(
            sessionMakeMultimodalInput, 'outFNs',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'fixed_image')
        JointFusionWF.connect(
            atlasMakeMultimodalInput[jointFusion_atlas_subject], 'outFNs',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'moving_image')
        "HACK NOT to use T2 for JointFusion"
        # JointFusionWF.connect(A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'warped_image',
        #               warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality) )
        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'warped_image', warpedAtlasesMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex * 1))
        """
        Original t2 resampling
        """
        for modality_index in range(1, n_modality):
            t2Resample[jointFusion_atlas_subject] = pe.Node(
                interface=ants.ApplyTransforms(),
                name="resampledT2" + jointFusion_atlas_subject)
            many_cpu_t2Resample_options_dictionary = {
                'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
                'overwrite': True
            }
            t2Resample[
                jointFusion_atlas_subject].plugin_args = many_cpu_t2Resample_options_dictionary
            t2Resample[jointFusion_atlas_subject].inputs.num_threads = -1
            t2Resample[jointFusion_atlas_subject].inputs.dimension = 3
            t2Resample[
                jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + '_t2.nii.gz'
            t2Resample[
                jointFusion_atlas_subject].inputs.interpolation = 'BSpline'
            t2Resample[jointFusion_atlas_subject].inputs.default_value = 0
            t2Resample[
                jointFusion_atlas_subject].inputs.invert_transform_flags = [
                    False
                ]

            JointFusionWF.connect(
                A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'composite_transform',
                t2Resample[jointFusion_atlas_subject], 'transforms')
            JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                                  t2Resample[jointFusion_atlas_subject],
                                  'reference_image')
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject], 't2',
                t2Resample[jointFusion_atlas_subject], 'input_image')
            "HACK NOT to use T2 for JointFusion only"
            # JointFusionWF.connect(t2Resample[jointFusion_atlas_subject],'output_image',
            #               warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality+modality_index) )
        """
        Original labelmap resampling
        """
        labelMapResample[jointFusion_atlas_subject] = pe.Node(
            interface=ants.ApplyTransforms(),
            name="resampledLabel" + jointFusion_atlas_subject)
        many_cpu_labelMapResample_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
            'overwrite': True
        }
        labelMapResample[
            jointFusion_atlas_subject].plugin_args = many_cpu_labelMapResample_options_dictionary
        labelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1
        labelMapResample[jointFusion_atlas_subject].inputs.dimension = 3
        labelMapResample[
            jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + '_2_subj_lbl.nii.gz'
        labelMapResample[
            jointFusion_atlas_subject].inputs.interpolation = 'MultiLabel'
        labelMapResample[jointFusion_atlas_subject].inputs.default_value = 0
        labelMapResample[
            jointFusion_atlas_subject].inputs.invert_transform_flags = [False]

        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'composite_transform', labelMapResample[jointFusion_atlas_subject],
            'transforms')
        JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                              labelMapResample[jointFusion_atlas_subject],
                              'reference_image')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'label',
                              labelMapResample[jointFusion_atlas_subject],
                              'input_image')

        JointFusionWF.connect(
            labelMapResample[jointFusion_atlas_subject], 'output_image',
            warpedAtlasLblMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex))

        ### New labelmap resampling
        NewlabelMapResample[jointFusion_atlas_subject] = pe.Node(
            interface=ants.ApplyTransforms(),
            name="FSWM_WLABEL_" + jointFusion_atlas_subject)
        many_cpu_NewlabelMapResample_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
            'overwrite': True
        }
        NewlabelMapResample[
            jointFusion_atlas_subject].plugin_args = many_cpu_NewlabelMapResample_options_dictionary
        NewlabelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1
        NewlabelMapResample[jointFusion_atlas_subject].inputs.dimension = 3
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + 'fswm_2_subj_lbl.nii.gz'
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.interpolation = 'MultiLabel'
        NewlabelMapResample[jointFusion_atlas_subject].inputs.default_value = 0
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.invert_transform_flags = [False]

        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'composite_transform',
            NewlabelMapResample[jointFusion_atlas_subject], 'transforms')
        JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                              NewlabelMapResample[jointFusion_atlas_subject],
                              'reference_image')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'label',
                              NewlabelMapResample[jointFusion_atlas_subject],
                              'input_image')

        JointFusionWF.connect(
            NewlabelMapResample[jointFusion_atlas_subject], 'output_image',
            NewwarpedAtlasLblMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex))

        jointFusion_atlas_mergeindex += 1

    ## Now work on cleaning up the label maps
    from .FixLabelMapsTools import FixLabelMapFromNeuromorphemetrics2012
    from .FixLabelMapsTools import RecodeLabelMap

    ### Original NeuroMorphometrica merged fusion
    jointFusion = pe.Node(interface=ants.AntsJointFusion(),
                          name="AntsJointFusion")
    many_cpu_JointFusion_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 10, 8, 16),
        'overwrite': True
    }
    jointFusion.plugin_args = many_cpu_JointFusion_options_dictionary
    jointFusion.inputs.num_threads = -1
    jointFusion.inputs.dimension = 3
    jointFusion.inputs.search_radius = [3]
    # jointFusion.inputs.method='Joint[0.1,2]'
    jointFusion.inputs.out_label_fusion = 'JointFusion_HDAtlas20_2015_label.nii.gz'
    # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', jointFusion, 'mask_image')
    JointFusionWF.connect(fixedROIAuto, 'outputROIMaskVolume', jointFusion,
                          'mask_image')

    JointFusionWF.connect(warpedAtlasLblMergeNode, 'out', jointFusion,
                          'atlas_segmentation_image')

    AdjustMergeListNode = pe.Node(Function(
        function=adjustMergeList,
        input_names=['allList', 'n_modality'],
        output_names=['out']),
                                  name="AdjustMergeListNode")
    "*** HACK JointFusion only uses T1"
    # AdjustMergeListNode.inputs.n_modality = n_modality
    AdjustMergeListNode.inputs.n_modality = 1

    JointFusionWF.connect(warpedAtlasesMergeNode, 'out', AdjustMergeListNode,
                          'allList')
    JointFusionWF.connect(AdjustMergeListNode, 'out', jointFusion,
                          'atlas_image')

    AdjustTargetImageListNode = pe.Node(Function(
        function=adjustMergeList,
        input_names=['allList', 'n_modality'],
        output_names=['out']),
                                        name="AdjustTargetImageListNode")
    AdjustTargetImageListNode.inputs.n_modality = n_modality

    "*** HACK JointFusion only uses T1"
    """ Once JointFusion works with T2 properly,
        delete sessionMakeListSingleModalInput and use sessionMakeMultimodalInput instead
    """
    sessionMakeListSingleModalInput = pe.Node(
        Function(function=MakeVector,
                 input_names=['inFN1', 'inFN2', 'jointFusion'],
                 output_names=['outFNs']),
        run_without_submitting=True,
        name="sessionMakeListSingleModalInput")
    sessionMakeListSingleModalInput.inputs.jointFusion = False
    JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                          sessionMakeListSingleModalInput, 'inFN1')
    JointFusionWF.connect(sessionMakeListSingleModalInput, 'outFNs',
                          jointFusion, 'target_image')

    JointFusionWF.connect(jointFusion, 'out_label_fusion', outputsSpec,
                          'JointFusion_HDAtlas20_2015_label')

    ## We need to recode values to ensure that the labels match FreeSurer as close as possible by merging
    ## some labels together to standard FreeSurfer confenventions (i.e. for WMQL)
    RECODE_LABELS_2_Standard_FSWM = [
        (15071, 47), (15072, 47), (15073, 47), (15145, 1011), (15157, 1011),
        (15161, 1011), (15179, 1012), (15141, 1014), (15151, 1017),
        (15163, 1018), (15165, 1019), (15143, 1027), (15191, 1028),
        (15193, 1028), (15185, 1030), (15201, 1030), (15175, 1031),
        (15195, 1031), (15173, 1035), (15144, 2011), (15156, 2011),
        (15160, 2011), (15178, 2012), (15140, 2014), (15150, 2017),
        (15162, 2018), (15164, 2019), (15142, 2027), (15190, 2028),
        (15192, 2028), (15184, 2030), (15174, 2031), (15194, 2031),
        (15172, 2035), (15200, 2030)
    ]
    ## def RecodeLabelMap(InputFileName,OutputFileName,RECODE_TABLE):
    RecodeToStandardFSWM = pe.Node(Function(
        function=RecodeLabelMap,
        input_names=['InputFileName', 'OutputFileName', 'RECODE_TABLE'],
        output_names=['OutputFileName']),
                                   name="RecodeToStandardFSWM")
    RecodeToStandardFSWM.inputs.RECODE_TABLE = RECODE_LABELS_2_Standard_FSWM
    RecodeToStandardFSWM.inputs.OutputFileName = 'JointFusion_HDAtlas20_2015_fs_standard_label.nii.gz'

    JointFusionWF.connect(RecodeToStandardFSWM, 'OutputFileName', outputsSpec,
                          'JointFusion_HDAtlas20_2015_fs_standard_label')

    ## JointFusion_SNAPSHOT_WRITER for Segmented result checking:
    #    JointFusion_SNAPSHOT_WRITERNodeName = "JointFusion_ExtendedJointFusion_SNAPSHOT_WRITER"
    #    JointFusion_SNAPSHOT_WRITER = pe.Node(interface=BRAINSSnapShotWriter(), name=JointFusion_SNAPSHOT_WRITERNodeName)

    #    JointFusion_SNAPSHOT_WRITER.inputs.outputFilename = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.png'  # output specification
    #    JointFusion_SNAPSHOT_WRITER.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    #    JointFusion_SNAPSHOT_WRITER.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22]

    #    JointFusionWF.connect(JointFusion_SNAPSHOT_WRITER,'outputFilename',outputsSpec,'JointFusion_extended_snapshot')

    myLocalDustCleanup = CreateDustCleanupWorkflow("DUST_CLEANUP", onlyT1,
                                                   master_config)
    JointFusionWF.connect(inputsSpec, 'subj_t1_image', myLocalDustCleanup,
                          'inputspec.subj_t1_image')
    if not onlyT1:
        JointFusionWF.connect(subjectT2Resample, 'outputVolume',
                              myLocalDustCleanup, 'inputspec.subj_t2_image')
    if runFixFusionLabelMap:
        ## post processing of jointfusion
        injectSurfaceCSFandVBIntoLabelMap = pe.Node(
            Function(function=FixLabelMapFromNeuromorphemetrics2012,
                     input_names=[
                         'fusionFN', 'FixedHeadFN', 'posterior_dict',
                         'LeftHemisphereFN', 'outFN', 'OUT_DICT'
                     ],
                     output_names=['fixedFusionLabelFN']),
            name="injectSurfaceCSFandVBIntoLabelMap")
        injectSurfaceCSFandVBIntoLabelMap.inputs.outFN = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.nii.gz'
        FREESURFER_DICT = {
            'BRAINSTEM': 16,
            'RH_CSF': 24,
            'LH_CSF': 24,
            'BLOOD': 15000,
            'UNKNOWN': 999,
            'CONNECTED': [11, 12, 13, 9, 17, 26, 50, 51, 52, 48, 53, 58]
        }
        injectSurfaceCSFandVBIntoLabelMap.inputs.OUT_DICT = FREESURFER_DICT
        JointFusionWF.connect(jointFusion, 'out_label_fusion',
                              injectSurfaceCSFandVBIntoLabelMap, 'fusionFN')
        JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels',
                              injectSurfaceCSFandVBIntoLabelMap, 'FixedHeadFN')
        JointFusionWF.connect(inputsSpec, 'subj_posteriors',
                              injectSurfaceCSFandVBIntoLabelMap,
                              'posterior_dict')
        JointFusionWF.connect(inputsSpec, 'subj_left_hemisphere',
                              injectSurfaceCSFandVBIntoLabelMap,
                              'LeftHemisphereFN')

        JointFusionWF.connect(injectSurfaceCSFandVBIntoLabelMap,
                              'fixedFusionLabelFN', myLocalDustCleanup,
                              'inputspec.subj_label_atlas')

        JointFusionWF.connect(
            injectSurfaceCSFandVBIntoLabelMap, 'fixedFusionLabelFN',
            outputsSpec, 'JointFusion_HDAtlas20_2015_CSFVBInjected_label')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            RecodeToStandardFSWM, 'InputFileName')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            outputsSpec, 'JointFusion_HDAtlas20_2015_dustCleaned_label')

    #        JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
    #                    (injectSurfaceCSFandVBIntoLabelMap, JointFusion_SNAPSHOT_WRITER,
    #                      [('fixedFusionLabelFN', 'inputBinaryVolumes')])
    #                   ])
    else:
        JointFusionWF.connect(jointFusion, 'output_label_image',
                              myLocalDustCleanup, 'inputspec.subj_label_atlas')

        JointFusionWF.connect(
            jointFusion, 'output_label_image', outputsSpec,
            'JointFusion_HDAtlas20_2015_CSFVBInjected_label')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            RecodeToStandardFSWM, 'InputFileName')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            outputsSpec, 'JointFusion_HDAtlas20_2015_dustCleaned_label')

    #        JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
    #                    (jointFusion, JointFusion_SNAPSHOT_WRITER,
    #                      [('output_label_image', 'inputBinaryVolumes')])
    #                   ])
    """
    Compute label volumes
    """
    computeLabelVolumes = CreateVolumeMeasureWorkflow("LabelVolume",
                                                      master_config)
    JointFusionWF.connect(inputsSpec, 'subj_t1_image', computeLabelVolumes,
                          'inputspec.subj_t1_image')
    JointFusionWF.connect(
        myLocalDustCleanup,
        'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
        computeLabelVolumes, 'inputspec.subj_label_image')
    JointFusionWF.connect(computeLabelVolumes, 'outputspec.csvFilename',
                          outputsSpec, 'JointFusion_volumes_csv')
    JointFusionWF.connect(computeLabelVolumes, 'outputspec.jsonFilename',
                          outputsSpec, 'JointFusion_volumes_json')

    ## Lobe Pacellation by recoding
    if master_config['relabel2lobes_filename'] != None:
        # print("Generate relabeled version based on {0}".format(master_config['relabel2lobes_filename']))

        RECODE_LABELS_2_LobePacellation = readRecodingList(
            master_config['relabel2lobes_filename'])
        RecordToFSLobes = pe.Node(Function(
            function=RecodeLabelMap,
            input_names=['InputFileName', 'OutputFileName', 'RECODE_TABLE'],
            output_names=['OutputFileName']),
                                  name="RecordToFSLobes")
        RecordToFSLobes.inputs.RECODE_TABLE = RECODE_LABELS_2_LobePacellation
        RecordToFSLobes.inputs.OutputFileName = 'JointFusion_HDAtlas20_2015_lobe_label.nii.gz'
        JointFusionWF.connect(RecodeToStandardFSWM, 'OutputFileName',
                              RecordToFSLobes, 'InputFileName')
        JointFusionWF.connect(RecordToFSLobes, 'OutputFileName', outputsSpec,
                              'JointFusion_HDAtlas20_2015_lobe_label')
        """
        Compute lobe volumes
        """
        computeLobeVolumes = CreateVolumeMeasureWorkflow(
            "LobeVolume", master_config)
        JointFusionWF.connect(inputsSpec, 'subj_t1_image', computeLobeVolumes,
                              'inputspec.subj_t1_image')
        JointFusionWF.connect(RecordToFSLobes, 'OutputFileName',
                              computeLobeVolumes, 'inputspec.subj_label_image')
        JointFusionWF.connect(computeLobeVolumes, 'outputspec.csvFilename',
                              outputsSpec, 'JointFusion_lobe_volumes_csv')
        JointFusionWF.connect(computeLobeVolumes, 'outputspec.jsonFilename',
                              outputsSpec, 'JointFusion_lobe_volumes_json')

    return JointFusionWF