def airmsk_wf(name='AirMaskWorkflow', save_memory=False, ants_settings=None):
    """Implements the Step 1 of [Mortamet2009]_."""
    import pkg_resources as pkgr
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_file', 'in_noinu', 'in_mask', 'head_mask']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['out_file', 'artifact_msk']),
        name='outputnode')

    antsparms = pe.Node(nio.JSONFileGrabber(), name='ants_settings')
    antsparms.inputs.in_file = (
        ants_settings if ants_settings is not None else pkgr.resource_filename(
            'structural_dhcp_mriqc', 'data/ants_settings.json'))

    def _invt_flags(transforms):
        return [True] * len(transforms)

    # Spatial normalization, using ANTs
    norm = pe.Node(ants.Registration(dimension=3), name='normalize')

    if save_memory:
        norm.inputs.fixed_image = op.join(get_mni_template(),
                                          'MNI152_T1_2mm.nii.gz')
        norm.inputs.fixed_image_mask = op.join(
            get_mni_template(), 'MNI152_T1_2mm_brain_mask.nii.gz')
    else:
        norm.inputs.fixed_image = op.join(get_mni_template(),
                                          'MNI152_T1_1mm.nii.gz')
        norm.inputs.fixed_image_mask = op.join(
            get_mni_template(), 'MNI152_T1_1mm_brain_mask.nii.gz')

    invt = pe.Node(ants.ApplyTransforms(dimension=3,
                                        default_value=1,
                                        interpolation='NearestNeighbor'),
                   name='invert_xfm')
    invt.inputs.input_image = op.join(get_mni_template(),
                                      'MNI152_T1_1mm_brain_bottom.nii.gz')

    # Combine and invert mask
    combine = pe.Node(niu.Function(input_names=['head_mask', 'artifact_msk'],
                                   output_names=['out_file'],
                                   function=combine_masks),
                      name='combine_masks')

    qi1 = pe.Node(ArtifactMask(), name='ArtifactMask')

    workflow.connect([(antsparms, norm, [
        ('initial_moving_transform_com', 'initial_moving_transform_com'),
        ('winsorize_lower_quantile', 'winsorize_lower_quantile'),
        ('winsorize_upper_quantile', 'winsorize_upper_quantile'),
        ('float', 'float'), ('transforms', 'transforms'),
        ('transform_parameters', 'transform_parameters'),
        ('number_of_iterations', 'number_of_iterations'),
        ('convergence_window_size', 'convergence_window_size'),
        ('metric', 'metric'), ('metric_weight', 'metric_weight'),
        ('radius_or_number_of_bins', 'radius_or_number_of_bins'),
        ('sampling_strategy', 'sampling_strategy'),
        ('sampling_percentage', 'sampling_percentage'),
        ('smoothing_sigmas', 'smoothing_sigmas'),
        ('shrink_factors', 'shrink_factors'),
        ('convergence_threshold', 'convergence_threshold'),
        ('sigma_units', 'sigma_units'),
        ('use_estimate_learning_rate_once', 'use_estimate_learning_rate_once'),
        ('use_histogram_matching', 'use_histogram_matching')
    ]), (inputnode, qi1, [('in_file', 'in_file')]),
                      (inputnode, norm, [('in_noinu', 'moving_image'),
                                         ('in_mask', 'moving_image_mask')]),
                      (norm, invt, [('forward_transforms', 'transforms'),
                                    (('forward_transforms', _invt_flags),
                                     'invert_transform_flags')]),
                      (inputnode, invt, [('in_mask', 'reference_image')]),
                      (inputnode, combine, [('head_mask', 'head_mask')]),
                      (invt, combine, [('output_image', 'artifact_msk')]),
                      (combine, qi1, [('out_file', 'air_msk')]),
                      (qi1, outputnode, [('out_air_msk', 'out_file'),
                                         ('out_art_msk', 'artifact_msk')])])
    return workflow
示例#2
0
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------
        name : name of workflow (default: 'registration')

    Inputs:

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs:

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = pe.Workflow(name=name)

    inputnode = pe.Node(interface=niu.IdentityInterface(fields=[
        'source_files', 'mean_image', 'anatomical_image', 'target_image',
        'target_image_brain', 'config_file'
    ]),
                        name='inputspec')
    outputnode = pe.Node(interface=niu.IdentityInterface(fields=[
        'func2anat_transform', 'anat2target_transform', 'transformed_files',
        'transformed_mean', 'anat2target', 'mean2anat_mask'
    ]),
                         name='outputspec')
    """
    Estimate the tissue classes from the anatomical image. But use spm's segment
    as FSL appears to be breaking.
    """

    stripper = pe.Node(fsl.BET(), name='stripper')
    register.connect(inputnode, 'anatomical_image', stripper, 'in_file')
    fast = pe.Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')
    """
    Binarize the segmentation
    """

    binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'),
                       name='binarize')
    pickindex = lambda x, i: x[i]
    register.connect(fast, ('partial_volume_files', pickindex, 2), binarize,
                     'in_file')
    """
    Calculate rigid transform from mean image to anatomical image
    """

    mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat')
    mean2anat.inputs.dof = 6
    register.connect(inputnode, 'mean_image', mean2anat, 'in_file')
    register.connect(stripper, 'out_file', mean2anat, 'reference')
    """
    Now use bbr cost function to improve the transform
    """

    mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr')
    mean2anatbbr.inputs.dof = 6
    mean2anatbbr.inputs.cost = 'bbr'
    mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'),
                                                'etc/flirtsch/bbr.sch')
    register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file')
    register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg')
    register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference')
    register.connect(mean2anat, 'out_matrix_file', mean2anatbbr,
                     'in_matrix_file')
    """
    Create a mask of the median image coregistered to the anatomical image
    """

    mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
    register.connect(mean2anatbbr, 'out_file', mean2anat_mask, 'in_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = pe.Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(mean2anatbbr, 'out_matrix_file', convert2itk,
                     'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = pe.Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.args = '--float'
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {
        'qsub_args': '-pe orte 4',
        'sbatch_args': '--mem=6G -c 4'
    }
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image_brain', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    merge = pe.Node(niu.Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = pe.Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 0
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.terminal_output = 'file'

    register.connect(inputnode, 'target_image_brain', warpmean,
                     'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = pe.MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image'],
                         name='warpall')
    warpall.inputs.input_image_type = 0
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.terminal_output = 'file'

    register.connect(inputnode, 'target_image_brain', warpall,
                     'reference_image')
    register.connect(inputnode, 'source_files', warpall, 'input_image')
    register.connect(merge, 'out', warpall, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(warpall, 'output_image', outputnode, 'transformed_files')
    register.connect(mean2anatbbr, 'out_matrix_file', outputnode,
                     'func2anat_transform')
    register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')

    return register
#-----------------------------------------------------------------------------------------------------
# In[7]:
#Now the average

RD = Node(fsl.BinaryMaths(), name='RD')
RD.inputs.operand_value = 2
RD.inputs.operation = 'div'

#-----------------------------------------------------------------------------------------------------
# Register to Study Waxholm template first, just to have both for purposes of comparison
#I am not comining both transformations, just I want to have them both to compare

#>>>>>>>>>>>>>>>>>>>>>>>>>>>FA

FA_to_WAX_Temp = Node(ants.Registration(), name='FA_To_WAX_Template')
FA_to_WAX_Temp.inputs.args = '--float'
FA_to_WAX_Temp.inputs.collapse_output_transforms = True
FA_to_WAX_Temp.inputs.initial_moving_transform_com = True
FA_to_WAX_Temp.inputs.fixed_image = Wax_FA_Template
FA_to_WAX_Temp.inputs.num_threads = 4
FA_to_WAX_Temp.inputs.output_inverse_warped_image = True
FA_to_WAX_Temp.inputs.output_warped_image = True
FA_to_WAX_Temp.inputs.sigma_units = ['vox'] * 3
FA_to_WAX_Temp.inputs.transforms = ['Rigid', 'Affine', 'SyN']
# FA_to_WAX_Temp.inputs.terminal_output='file' #returns an error
FA_to_WAX_Temp.inputs.winsorize_lower_quantile = 0.005
FA_to_WAX_Temp.inputs.winsorize_upper_quantile = 0.995
FA_to_WAX_Temp.inputs.convergence_threshold = [1e-6]
FA_to_WAX_Temp.inputs.convergence_window_size = [10]
FA_to_WAX_Temp.inputs.metric = ['MI', 'MI', 'CC']
示例#4
0
def canonical(
    subjects_participants,
    regdir,
    f2s,
    template="~/GitHub/mriPipeline/templates/waxholm/WHS_SD_rat_T2star_v1.01_downsample3.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
):
    """Warp a functional image based on the functional-to-structural and the structural-to-template registrations.
	Currently this approach is failing because the functiona-to-structural registration pushes the brain stem too far down.
	This may be

	"""
    template = os.path.expanduser(template)
    for subject_participant in subjects_participants:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_participant))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_participant))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            #struct
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}/ss_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}/ss__n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET_res = struct_BET.run()

            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = 6

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}/s_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            struct_registration_res = struct_registration.run()

            #func
            func_n4 = ants.N4BiasFieldCorrection()
            func_n4.inputs.dimension = 3
            func_n4.inputs.input_image = func_image
            func_n4.inputs.bspline_fitting_distance = 100
            func_n4.inputs.shrink_factor = 2
            func_n4.inputs.n_iterations = [200, 200, 200, 200]
            func_n4.inputs.convergence_threshold = 1e-11
            func_n4.inputs.output_image = '{}/f_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_n4_res = func_n4.run()

            func_registration = ants.Registration()
            func_registration.inputs.fixed_image = n4_res.outputs.output_image
            func_registration.inputs.output_transform_prefix = "func_"
            func_registration.inputs.transforms = [f2s]
            func_registration.inputs.transform_parameters = [(0.1, )]
            func_registration.inputs.number_of_iterations = [[40, 20, 10]]
            func_registration.inputs.dimension = 3
            func_registration.inputs.write_composite_transform = True
            func_registration.inputs.collapse_output_transforms = True
            func_registration.inputs.initial_moving_transform_com = True
            func_registration.inputs.metric = ['MeanSquares']
            func_registration.inputs.metric_weight = [1]
            func_registration.inputs.radius_or_number_of_bins = [16]
            func_registration.inputs.sampling_strategy = ["Regular"]
            func_registration.inputs.sampling_percentage = [0.3]
            func_registration.inputs.convergence_threshold = [1.e-2]
            func_registration.inputs.convergence_window_size = [8]
            func_registration.inputs.smoothing_sigmas = [[4, 2,
                                                          1]]  # [1,0.5,0]
            func_registration.inputs.sigma_units = ['vox']
            func_registration.inputs.shrink_factors = [[3, 2, 1]]
            func_registration.inputs.use_estimate_learning_rate_once = [True]
            func_registration.inputs.use_histogram_matching = [False]
            func_registration.inputs.winsorize_lower_quantile = 0.005
            func_registration.inputs.winsorize_upper_quantile = 0.995
            func_registration.inputs.args = '--float'
            func_registration.inputs.num_threads = 6

            func_registration.inputs.moving_image = func_n4_res.outputs.output_image
            func_registration.inputs.output_warped_image = '{}/f_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_registration_res = func_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False, False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}/{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            warp.num_threads = 6

            warp.inputs.input_image = func_image
            warp.inputs.transforms = [
                func_registration_res.outputs.composite_transform,
                struct_registration_res.outputs.composite_transform
            ]
            warp.run()
示例#5
0
文件: newreg.py 项目: setina42/SAMRI
def structural(
    substitutions,
    parameters,
    reference="/usr/share/mouse-brain-atlases/dsurqec_200micron.nii",
    structural_file_template="~/ni_data/ofM.dr/preprocessing/{preprocessing_workdir}/_subject_session_{subject}.{session}/_scan_type_{scan}/s_bru2nii/",
    workdir="~/samri_optimize/structural",
    threads=6,
    prefix="_",
):

    reference = os.path.abspath(os.path.expanduser(reference))
    workdir = os.path.abspath(
        os.path.expanduser("~/samri_optimize/structural"))
    if not os.path.exists(workdir):
        os.makedirs(workdir)

    for substitution in substitutions:
        image_path = structural_file_template.format(**substitution)
        image_path = os.path.abspath(os.path.expanduser(image_path))
        if os.path.isdir(image_path):
            try:
                for myfile in os.listdir(image_path):
                    if myfile.endswith(".nii") and (not prefix or
                                                    myfile.startswith(prefix)):
                        image_path = os.path.join(image_path, myfile)
            except FileNotFoundError:
                pass
        if not os.path.isfile(image_path):
            print("{} not found!".format(image_path))
            pass
        else:
            n4_out = os.path.join(
                workdir,
                'n4_{subject}_{session}.nii.gz'.format(**substitution))
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = image_path
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 10
            n4.inputs.bspline_order = 4
            # n4.inputs.bspline_fitting_distance = 95
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [150, 100, 50, 30]
            n4.inputs.convergence_threshold = 1e-16
            n4.inputs.num_threads = threads
            n4.inputs.output_image = n4_out
            print("Running bias field correction:\n{}".format(n4.cmdline))
            n4_run = n4.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = reference
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = [
                i["transforms"] for i in parameters
            ]  ##
            # for stability: high second SyN parameter, low first and third (https://www.neuro.polymtl.ca/tips_and_tricks/how_to_use_ants)
            struct_registration.inputs.transform_parameters = [
                i["transform_parameters"] for i in parameters
            ]  ##
            struct_registration.inputs.number_of_iterations = [
                i["number_of_iterations"] for i in parameters
            ]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally;
            # MeanSquares is ok
            # GC tilts too much if sampling_percentage is set too high, but GC with sampling_percentage <= 20 is the only metric that can prevent bits skin on the skull from being mapped onto the brain
            struct_registration.inputs.metric = [
                i["metric"] for i in parameters
            ]
            struct_registration.inputs.metric_weight = [
                i["metric_weight"] for i in parameters
            ]
            #the following relates to the similarity metric (e.g. size of the bins for the histogram):
            struct_registration.inputs.radius_or_number_of_bins = [
                i["radius_or_number_of_bins"] for i in parameters
            ]
            #Regular and Random sampling for SyN over-stretch the brain rostrocaudally
            struct_registration.inputs.sampling_strategy = [
                i["sampling_strategy"] for i in parameters
            ]
            #The Rigid sampling_percentage needs to be kept low to ensure that the image does not start to rotate
            #very weird thins happen at sampling_percentage==0.15 but not at sampling_percentage==0.2 or sampling_percentage==0.1
            struct_registration.inputs.sampling_percentage = [
                i["sampling_percentage"] for i in parameters
            ]
            struct_registration.inputs.convergence_threshold = [
                i["convergence_threshold"] for i in parameters
            ]
            #the above threshold pertains to similarity improvement over the last <convergenceWindowSize> iterations
            struct_registration.inputs.convergence_window_size = [
                i["convergence_window_size"] for i in parameters
            ]
            struct_registration.inputs.smoothing_sigmas = [
                i["smoothing_sigmas"] for i in parameters
            ]
            struct_registration.inputs.sigma_units = [
                i["sigma_units"] for i in parameters
            ]
            struct_registration.inputs.shrink_factors = [
                i["shrink_factors"] for i in parameters
            ]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                i["use_estimate_learning_rate_once"] for i in parameters
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [
                i["use_histogram_matching"] for i in parameters
            ]
            struct_registration.inputs.winsorize_lower_quantile = 0.05
            struct_registration.inputs.winsorize_upper_quantile = 0.95
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.fixed_image_mask = "/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii"
            struct_registration.inputs.num_threads = threads
            struct_registration.inputs.output_warped_image = os.path.join(
                workdir, '{subject}_{session}.nii.gz'.format(**substitution))
            struct_registration.inputs.moving_image = n4_out
            print("Running registration:\n{}".format(
                struct_registration.cmdline))
            struct_registration_run = struct_registration.run()
示例#6
0
文件: nodes.py 项目: dancebean/SAMRI
def generic_registration(
    template,
    structural_mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
    functional_mask='',
    num_threads=4,
    phase_dictionary=GENERIC_PHASES,
    s_phases=['s_translation', 'similarity', 'affine', 'syn'],
    f_phases=[
        'f_translation',
    ],
):

    s_phases = [phase for phase in s_phases if phase in phase_dictionary]
    f_phases = [phase for phase in f_phases if phase in phase_dictionary]

    s_parameters = [phase_dictionary[selection] for selection in s_phases]

    s_registration = pe.Node(ants.Registration(), name="s_register")
    s_registration.inputs.fixed_image = path.abspath(path.expanduser(template))
    s_registration.inputs.output_transform_prefix = "output_"
    s_registration.inputs.transforms = [i["transforms"]
                                        for i in s_parameters]  ##
    s_registration.inputs.transform_parameters = [
        i["transform_parameters"] for i in s_parameters
    ]  ##
    s_registration.inputs.number_of_iterations = [
        i["number_of_iterations"] for i in s_parameters
    ]  #
    s_registration.inputs.dimension = 3
    s_registration.inputs.write_composite_transform = True
    s_registration.inputs.collapse_output_transforms = True
    s_registration.inputs.initial_moving_transform_com = 0
    s_registration.inputs.metric = [i["metric"] for i in s_parameters]
    s_registration.inputs.metric_weight = [
        i["metric_weight"] for i in s_parameters
    ]
    s_registration.inputs.radius_or_number_of_bins = [
        i["radius_or_number_of_bins"] for i in s_parameters
    ]
    s_registration.inputs.sampling_strategy = [
        i["sampling_strategy"] for i in s_parameters
    ]
    s_registration.inputs.sampling_percentage = [
        i["sampling_percentage"] for i in s_parameters
    ]
    s_registration.inputs.convergence_threshold = [
        i["convergence_threshold"] for i in s_parameters
    ]
    s_registration.inputs.convergence_window_size = [
        i["convergence_window_size"] for i in s_parameters
    ]
    s_registration.inputs.smoothing_sigmas = [
        i["smoothing_sigmas"] for i in s_parameters
    ]
    s_registration.inputs.sigma_units = [
        i["sigma_units"] for i in s_parameters
    ]
    s_registration.inputs.shrink_factors = [
        i["shrink_factors"] for i in s_parameters
    ]
    s_registration.inputs.use_estimate_learning_rate_once = [
        i["use_estimate_learning_rate_once"] for i in s_parameters
    ]
    s_registration.inputs.use_histogram_matching = [
        i["use_histogram_matching"] for i in s_parameters
    ]
    s_registration.inputs.winsorize_lower_quantile = 0.005
    s_registration.inputs.winsorize_upper_quantile = 0.995
    s_registration.inputs.args = '--float'
    if structural_mask:
        s_registration.inputs.fixed_image_masks = [
            path.abspath(path.expanduser(structural_mask))
        ]
    s_registration.inputs.num_threads = num_threads

    f_parameters = [phase_dictionary[selection] for selection in f_phases]

    f_registration = pe.Node(ants.Registration(), name="f_register")
    #f_registration.inputs.fixed_image = path.abspath(path.expanduser(template))
    f_registration.inputs.output_transform_prefix = "output_"
    f_registration.inputs.transforms = [i["transforms"]
                                        for i in f_parameters]  ##
    f_registration.inputs.transform_parameters = [
        i["transform_parameters"] for i in f_parameters
    ]  ##
    f_registration.inputs.number_of_iterations = [
        i["number_of_iterations"] for i in f_parameters
    ]  #
    f_registration.inputs.dimension = 3
    f_registration.inputs.write_composite_transform = True
    f_registration.inputs.collapse_output_transforms = True
    f_registration.inputs.initial_moving_transform_com = 0
    f_registration.inputs.metric = [i["metric"] for i in f_parameters]
    f_registration.inputs.metric_weight = [
        i["metric_weight"] for i in f_parameters
    ]
    f_registration.inputs.radius_or_number_of_bins = [
        i["radius_or_number_of_bins"] for i in f_parameters
    ]
    f_registration.inputs.sampling_strategy = [
        i["sampling_strategy"] for i in f_parameters
    ]
    f_registration.inputs.sampling_percentage = [
        i["sampling_percentage"] for i in f_parameters
    ]
    f_registration.inputs.convergence_threshold = [
        i["convergence_threshold"] for i in f_parameters
    ]
    f_registration.inputs.convergence_window_size = [
        i["convergence_window_size"] for i in f_parameters
    ]
    f_registration.inputs.smoothing_sigmas = [
        i["smoothing_sigmas"] for i in f_parameters
    ]
    f_registration.inputs.sigma_units = [
        i["sigma_units"] for i in f_parameters
    ]
    f_registration.inputs.shrink_factors = [
        i["shrink_factors"] for i in f_parameters
    ]
    f_registration.inputs.use_estimate_learning_rate_once = [
        i["use_estimate_learning_rate_once"] for i in f_parameters
    ]
    f_registration.inputs.use_histogram_matching = [
        i["use_histogram_matching"] for i in f_parameters
    ]
    f_registration.inputs.winsorize_lower_quantile = 0.05
    f_registration.inputs.winsorize_upper_quantile = 0.95
    f_registration.inputs.args = '--float'
    if functional_mask:
        f_registration.inputs.fixed_image_masks = [
            path.abspath(path.expanduser(functional_mask))
        ]
    f_registration.inputs.num_threads = num_threads

    #f_warp = pe.Node(ants.WarpTimeSeriesImageMultiTransform(), name='f_warp')
    #f_warp.inputs.dimension = 4
    f_warp = pe.Node(ants.ApplyTransforms(), name="f_warp")
    f_warp.inputs.reference_image = path.abspath(path.expanduser(template))
    f_warp.inputs.input_image_type = 3
    f_warp.inputs.interpolation = 'BSpline'
    f_warp.inputs.interpolation_parameters = (5, )
    f_warp.inputs.invert_transform_flags = [False, False]
    f_warp.num_threads = num_threads
    f_warp.interface.mem_gb = 16

    s_warp = pe.Node(ants.ApplyTransforms(), name="s_warp")
    s_warp.inputs.reference_image = path.abspath(path.expanduser(template))
    s_warp.inputs.input_image_type = 0
    s_warp.inputs.interpolation = 'BSpline'
    s_warp.inputs.interpolation_parameters = (5, )
    s_warp.inputs.invert_transform_flags = [False]
    s_warp.num_threads = num_threads

    return s_registration, s_warp, f_registration, f_warp
示例#7
0
def functional_per_participant_test():
    for i in ["", "_aF", "_cF1", "_cF2", "_pF"]:
        template = "~/ni_data/templates/ds_QBI_chr.nii.gz"
        participant = "4008"
        image_dir = "~/ni_data/ofM.dr/preprocessing/generic_work/_subject_session_{}.ofM{}/_scan_type_7_EPI_CBV/temporal_mean/".format(
            participant, i)
        try:
            for myfile in os.listdir(image_dir):
                if myfile.endswith(".nii.gz"):
                    mimage = os.path.join(image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = mimage
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = 'n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            n4_res = n4.run()

            functional_cutoff = ImageMaths()
            functional_cutoff.inputs.op_string = "-thrP 30"
            functional_cutoff.inputs.in_file = n4_res.outputs.output_image
            functional_cutoff_res = functional_cutoff.run()

            functional_BET = BET()
            functional_BET.inputs.mask = True
            functional_BET.inputs.frac = 0.5
            functional_BET.inputs.in_file = functional_cutoff_res.outputs.out_file
            functional_BET_res = functional_BET.run()

            registration = ants.Registration()
            registration.inputs.fixed_image = template
            registration.inputs.output_transform_prefix = "output_"
            registration.inputs.transforms = ['Affine', 'SyN']
            registration.inputs.transform_parameters = [(0.1, ),
                                                        (3.0, 3.0, 5.0)]
            registration.inputs.number_of_iterations = [[10000, 10000, 10000],
                                                        [100, 100, 100]]
            registration.inputs.dimension = 3
            registration.inputs.write_composite_transform = True
            registration.inputs.collapse_output_transforms = True
            registration.inputs.initial_moving_transform_com = True
            registration.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
            registration.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
            registration.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
            registration.inputs.sampling_strategy = ['Regular'] * 2 + [[
                None, None
            ]]
            registration.inputs.sampling_percentage = [0.3] * 2 + [[
                None, None
            ]]
            registration.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
            registration.inputs.convergence_window_size = [20] * 2 + [5]
            registration.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[
                1, 0.5, 0
            ]]
            registration.inputs.sigma_units = ['vox'] * 3
            registration.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
            registration.inputs.use_estimate_learning_rate_once = [True] * 3
            registration.inputs.use_histogram_matching = [False] * 2 + [True]
            registration.inputs.winsorize_lower_quantile = 0.005
            registration.inputs.winsorize_upper_quantile = 0.995
            registration.inputs.args = '--float'
            registration.inputs.num_threads = 4
            registration.plugin_args = {
                'qsub_args': '-pe orte 4',
                'sbatch_args': '--mem=6G -c 4'
            }

            registration.inputs.moving_image = functional_BET_res.outputs.out_file
            registration.inputs.output_warped_image = '{}_ofM{}.nii.gz'.format(
                participant, i)
            res = registration.run()
示例#8
0
tsnr.inputs.mean_file = 'mean.nii'
tsnr.inputs.stddev_file = 'stddev.nii'
tsnr.inputs.tsnr_file = 'tsnr.nii'

despike = MapNode(afni.Despike(), iterfield='in_file', name='despike')
despike.inputs.outputtype = 'NIFTI'

seg = Node(spm.Segment(), name='seg')
seg.inputs.csf_output_type = [False, False, True]  #Output native CSF seg
seg.inputs.gm_output_type = [False, False, True]  #Output native gm seg
seg.inputs.wm_output_type = [False, False, True]  #Output native wm seg

coreg2epi = MapNode(spm.Coregister(), iterfield='target', name='coreg2epi')

#Warps to MNI space using a 3mm template image
antsnorm = MapNode(ants.Registration(),
                   iterfield='moving_image',
                   name='antsnorm')
antsnorm.inputs.collapse_output_transforms = True
antsnorm.inputs.initial_moving_transform_com = True
antsnorm.inputs.num_threads = 1
antsnorm.inputs.output_inverse_warped_image = True
antsnorm.inputs.output_warped_image = True
antsnorm.inputs.sigma_units = ['vox'] * 3
antsnorm.inputs.transforms = ['Rigid', 'Affine', 'SyN']
antsnorm.inputs.terminal_output = 'file'
antsnorm.inputs.winsorize_lower_quantile = 0.005
antsnorm.inputs.winsorize_upper_quantile = 0.995
antsnorm.inputs.convergence_threshold = [1e-06]
antsnorm.inputs.convergence_window_size = [10]
antsnorm.inputs.metric = ['MI', 'MI', 'CC']
示例#9
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline."""

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from nipype.interfaces import ants

        import clinica.pipelines.pet_linear.pet_linear_utils as utils

        # Utilitary nodes
        init_node = npe.Node(
            interface=nutil.Function(
                input_names=["pet"],
                output_names=["pet"],
                function=utils.init_input_node,
            ),
            name="initPipeline",
        )
        concatenate_node = npe.Node(
            interface=nutil.Function(
                input_names=["pet_to_t1w_tranform", "t1w_to_mni_tranform"],
                output_names=["transforms_list"],
                function=utils.concatenate_transforms,
            ),
            name="concatenateTransforms",
        )

        # The core (processing) nodes

        # 1. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
        ants_registration_node = npe.Node(
            name="antsRegistration", interface=ants.RegistrationSynQuick())
        ants_registration_node.inputs.dimension = 3
        ants_registration_node.inputs.transform_type = "r"

        # 2. `ApplyTransforms` by *ANTS*. It uses nipype interface. PET to MRI
        ants_applytransform_node = npe.Node(name="antsApplyTransformPET2MNI",
                                            interface=ants.ApplyTransforms())
        ants_applytransform_node.inputs.dimension = 3
        ants_applytransform_node.inputs.reference_image = self.ref_template

        # 3. Normalize the image (using nifti). It uses custom interface, from utils file
        ants_registration_nonlinear_node = npe.Node(
            name="antsRegistrationT1W2MNI", interface=ants.Registration())
        ants_registration_nonlinear_node.inputs.fixed_image = self.ref_template
        ants_registration_nonlinear_node.inputs.metric = ["MI"]
        ants_registration_nonlinear_node.inputs.metric_weight = [1.0]
        ants_registration_nonlinear_node.inputs.transforms = ["SyN"]
        ants_registration_nonlinear_node.inputs.transform_parameters = [(0.1,
                                                                         3, 0)]
        ants_registration_nonlinear_node.inputs.dimension = 3
        ants_registration_nonlinear_node.inputs.shrink_factors = [[8, 4, 2]]
        ants_registration_nonlinear_node.inputs.smoothing_sigmas = [[3, 2, 1]]
        ants_registration_nonlinear_node.inputs.sigma_units = ["vox"]
        ants_registration_nonlinear_node.inputs.number_of_iterations = [[
            200, 50, 10
        ]]
        ants_registration_nonlinear_node.inputs.convergence_threshold = [1e-05]
        ants_registration_nonlinear_node.inputs.convergence_window_size = [10]
        ants_registration_nonlinear_node.inputs.radius_or_number_of_bins = [32]
        ants_registration_nonlinear_node.inputs.winsorize_lower_quantile = 0.005
        ants_registration_nonlinear_node.inputs.winsorize_upper_quantile = 0.995
        ants_registration_nonlinear_node.inputs.collapse_output_transforms = True
        ants_registration_nonlinear_node.inputs.use_histogram_matching = False
        ants_registration_nonlinear_node.inputs.verbose = True

        ants_applytransform_nonlinear_node = npe.Node(
            name="antsApplyTransformNonLinear",
            interface=ants.ApplyTransforms())
        ants_applytransform_nonlinear_node.inputs.dimension = 3
        ants_applytransform_nonlinear_node.inputs.reference_image = self.ref_template

        normalize_intensity_node = npe.Node(
            name="intensityNormalization",
            interface=nutil.Function(
                function=utils.suvr_normalization,
                input_names=["input_img", "norm_img", "ref_mask"],
                output_names=["output_img"],
            ),
        )
        normalize_intensity_node.inputs.ref_mask = self.ref_mask

        # 4. Crop image (using nifti). It uses custom interface, from utils file
        crop_nifti_node = npe.Node(
            name="cropNifti",
            interface=nutil.Function(
                function=utils.crop_nifti,
                input_names=["input_img", "ref_crop"],
                output_names=["output_img"],
            ),
        )
        crop_nifti_node.inputs.ref_crop = self.ref_crop

        # 5. Print end message
        print_end_message = npe.Node(
            interface=nutil.Function(input_names=["pet", "final_file"],
                                     function=utils.print_end_pipeline),
            name="WriteEndMessage",
        )

        # 6. Optionnal node: compute PET image in T1w
        ants_applytransform_optional_node = npe.Node(
            name="antsApplyTransformPET2T1w", interface=ants.ApplyTransforms())
        ants_applytransform_optional_node.inputs.dimension = 3

        # Connection
        # ==========
        # fmt: off
        self.connect([
            (self.input_node, init_node, [("pet", "pet")]),
            # STEP 1
            (self.input_node, ants_registration_node, [("t1w", "fixed_image")]
             ),
            (init_node, ants_registration_node, [("pet", "moving_image")]),
            # STEP 2
            (ants_registration_node, concatenate_node,
             [("out_matrix", "pet_to_t1w_tranform")]),
            (self.input_node, concatenate_node, [("t1w_to_mni",
                                                  "t1w_to_mni_tranform")]),
            (self.input_node, ants_applytransform_node, [("pet", "input_image")
                                                         ]),
            (concatenate_node, ants_applytransform_node, [("transforms_list",
                                                           "transforms")]),
            # STEP 3
            (self.input_node, ants_registration_nonlinear_node,
             [("t1w", "moving_image")]),
            (ants_registration_nonlinear_node,
             ants_applytransform_nonlinear_node, [
                 ("reverse_forward_transforms", "transforms")
             ]),
            (ants_applytransform_node, ants_applytransform_nonlinear_node,
             [("output_image", "input_image")]),
            (ants_applytransform_node, normalize_intensity_node,
             [("output_image", "input_img")]),
            (ants_applytransform_nonlinear_node, normalize_intensity_node,
             [("output_image", "norm_img")]),
            # Connect to DataSink
            (ants_registration_node, self.output_node, [("out_matrix",
                                                         "affine_mat")]),
            (normalize_intensity_node, self.output_node, [("output_img",
                                                           "suvr_pet")]),
            (self.input_node, print_end_message, [("pet", "pet")]),
        ])
        # STEP 4
        if not (self.parameters.get("uncropped_image")):
            self.connect([
                (normalize_intensity_node, crop_nifti_node, [("output_img",
                                                              "input_img")]),
                (crop_nifti_node, self.output_node, [("output_img",
                                                      "outfile_crop")]),
                (crop_nifti_node, print_end_message, [("output_img",
                                                       "final_file")]),
            ])
        else:
            self.connect([
                (normalize_intensity_node, print_end_message,
                 [("output_img", "final_file")]),
            ])
        # STEP 6: Optionnal argument
        if self.parameters.get("save_PETinT1w"):
            self.connect([
                (self.input_node, ants_applytransform_optional_node,
                 [("pet", "input_image"), ("t1w", "reference_image")]),
                (ants_registration_node, ants_applytransform_optional_node,
                 [("out_matrix", "transforms")]),
                (ants_applytransform_optional_node, self.output_node,
                 [("output_image", "PETinT1w")]),
            ])
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements, too-many-branches
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(
        moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(
            init_moving_transform):
        util.LOGGER.info("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform,
                                     label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = NUM_THREADS_ANTS
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid', 'Rigid', 'Rigid']
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[12, 9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[9, 8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == AFFINE or reg_type == COMPOSITEAFFINE or reg_type == SIMILARITY:
        if reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine', 'Affine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity', 'Similarity']
        else:
            reg.inputs.transforms = [
                'Rigid', 'CompositeAffine', 'CompositeAffine'
            ]
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    util.LOGGER.info("starting registration")
    start_time = datetime.datetime.now()
    util.LOGGER.info(reg.cmdline)
    reg.run()
    util.LOGGER.info("Finished registration: ")
    util.LOGGER.info(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath,
                          transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img
def pre_process(img, do_bet=True, slice_size=1, reg_type=None, be_method=None):
    # pylint: disable= too-many-statements, too-many-locals, too-many-branches
    """ Pre process the data"""
    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.num_threads = NUM_THREADS_ANTS
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data / np.amax(temp_data) * 100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)
    del temp_img

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * slice_size
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if be_method == 0:
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        else:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[
            15000, 12000, 10000, 10000, 10000, 5000, 5000
        ], [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[19, 16, 12, 9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[10, 10, 10, 8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        reg.run()
        util.LOGGER.info("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif be_method == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command=BET_COMMAND)
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif be_method == 2:
        if BET_FRAC > 0:
            name = util.get_basename(resampled_file) + "_bet"
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
            bet = fsl.BET(command=BET_COMMAND)
            bet.inputs.in_file = resampled_file
            # pylint: disable= pointless-string-statement
            """ fractional intensity threshold (0->1); default=0.5;
            smaller values give larger brain outline estimates"""
            bet.inputs.frac = BET_FRAC
            """ vertical gradient in fractional intensity threshold (-1->1);
            default=0; positive values give larger brain outline at bottom,
            smaller at top """
            bet.inputs.vertical_gradient = 0
            """  This attempts to reduce image bias, and residual neck voxels.
            This can be useful when running SIENA or SIENAX, for example.
            Various stages involving FAST segmentation-based bias field removal
            and standard-space masking are combined to produce a result which
            can often give better results than just running bet2."""
            bet.inputs.reduce_bias = True
            bet.inputs.mask = True
            bet.inputs.out_file = path + name + '.nii.gz'
            util.LOGGER.info("starting bet registration")
            start_time = datetime.datetime.now()
            util.LOGGER.info(bet.cmdline)
            if not os.path.exists(bet.inputs.out_file):
                bet.run()
            util.LOGGER.info("Finished bet registration 0: ")
            util.LOGGER.info(datetime.datetime.now() - start_time)
            name += "_be"
            moving_image = util.TEMPLATE_MASKED_VOLUME
            fixed_image = bet.inputs.out_file
        else:
            name = util.get_basename(resampled_file) + "_be"
            moving_image = util.TEMPLATE_VOLUME
            fixed_image = resampled_file

        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'
        img.pre_processed_filepath = path + name + '.nii.gz'
        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = fixed_image
        reg.inputs.moving_image = moving_image
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        elif reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        util.LOGGER.info(reg.cmdline)
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        util.LOGGER.info("Finished be registration: ")
        util.LOGGER.info(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        brain_mask = util.TEMPLATE_MASK
        #brain_mask = img.reg_brainmask_filepath
        if not brain_mask:
            brain_mask = util.TEMPLATE_MASK
        print("Using brain mask " + brain_mask)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = brain_mask
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    else:
        util.LOGGER.error(" INVALID BE METHOD!!!!")

    util.LOGGER.info("---BET " + img.pre_processed_filepath)
    return img
示例#12
0
    0.001
]
coreg2epi.inputs.fwhm = [7, 7]
coreg2epi.inputs.write_interp = 1
coreg2epi.inputs.write_wrap = [0, 0, 0]
coreg2epi.inputs.write_mask = False
#Output: coregistered_files

#Segment anatomical
seg = Node(spm.NewSegment(), name='segment')
#Outputs:

#Warps to MNI space using a 3mm template image
#Note - The template is warped to subj space then the inverse transform (subj space > MNI) is used
#to warp the data.
antsnorm = Node(ants.Registration(), name='antsnorm')
antsnorm.inputs.output_transform_prefix = 'new'
antsnorm.inputs.collapse_output_transforms = True
antsnorm.inputs.initial_moving_transform_com = True
antsnorm.inputs.num_threads = 1
antsnorm.inputs.output_inverse_warped_image = True
antsnorm.inputs.output_warped_image = True
antsnorm.inputs.sigma_units = ['vox'] * 3
antsnorm.inputs.transforms = ['Rigid', 'Affine', 'SyN']
antsnorm.inputs.terminal_output = 'file'
antsnorm.inputs.winsorize_lower_quantile = 0.005
antsnorm.inputs.winsorize_upper_quantile = 0.995
antsnorm.inputs.convergence_threshold = [1e-06]
antsnorm.inputs.convergence_window_size = [10]
antsnorm.inputs.metric = ['MI', 'MI', 'CC']
antsnorm.inputs.metric_weight = [1.0] * 3
示例#13
0
def sdc_fmb(name='fmb_correction',
            interp='Linear',
            fugue_params=dict(smooth3d=2.0)):
    """
    SDC stands for susceptibility distortion correction. FMB stands for
    fieldmap-based.

    The fieldmap based (FMB) method implements SDC by using a mapping of the
    B0 field as proposed by [Jezzard95]_. This workflow uses the implementation
    of FSL (`FUGUE <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE>`_). Phase
    unwrapping is performed using `PRELUDE
    <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/fugue/prelude.html>`_
    [Jenkinson03]_. Preparation of the fieldmap is performed reproducing the
    script in FSL `fsl_prepare_fieldmap
    <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE/Guide#SIEMENS_data>`_.



    Example
    -------

    >>> from nipype.workflows.dmri.fsl.artifacts import sdc_fmb
    >>> fmb = sdc_fmb()
    >>> fmb.inputs.inputnode.in_file = 'diffusion.nii'
    >>> fmb.inputs.inputnode.in_ref = list(range(0, 30, 6))
    >>> fmb.inputs.inputnode.in_mask = 'mask.nii'
    >>> fmb.inputs.inputnode.bmap_mag = 'magnitude.nii'
    >>> fmb.inputs.inputnode.bmap_pha = 'phase.nii'
    >>> fmb.inputs.inputnode.settings = 'epi_param.txt'
    >>> fmb.run() # doctest: +SKIP

    .. warning:: Only SIEMENS format fieldmaps are supported.

    .. admonition:: References

      .. [Jezzard95] Jezzard P, and Balaban RS, `Correction for geometric
        distortion in echo planar images from B0 field variations
        <http://dx.doi.org/10.1002/mrm.1910340111>`_,
        MRM 34(1):65-73. (1995). doi: 10.1002/mrm.1910340111.

      .. [Jenkinson03] Jenkinson M., `Fast, automated, N-dimensional
        phase-unwrapping algorithm <http://dx.doi.org/10.1002/mrm.10354>`_,
        MRM 49(1):193-197, 2003, doi: 10.1002/mrm.10354.

    """

    epi_defaults = {
        'delta_te': 2.46e-3,
        'echospacing': 0.77e-3,
        'acc_factor': 2,
        'enc_dir': u'AP'
    }

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'in_file', 'in_ref', 'in_mask', 'bmap_pha', 'bmap_mag', 'settings'
    ]),
                        name='inputnode')

    outputnode = pe.Node(
        niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']),
        name='outputnode')

    r_params = pe.Node(JSONFileGrabber(defaults=epi_defaults),
                       name='SettingsGrabber')
    eff_echo = pe.Node(niu.Function(function=_eff_t_echo,
                                    input_names=['echospacing', 'acc_factor'],
                                    output_names=['eff_echo']),
                       name='EffEcho')

    firstmag = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='GetFirst')
    n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias')
    bet = pe.Node(fsl.BET(frac=0.4, mask=True), name='BrainExtraction')
    dilate = pe.Node(fsl.maths.MathsCommand(nan2zeros=True,
                                            args='-kernel sphere 5 -dilM'),
                     name='MskDilate')
    pha2rads = pe.Node(niu.Function(input_names=['in_file'],
                                    output_names=['out_file'],
                                    function=siemens2rads),
                       name='PreparePhase')
    prelude = pe.Node(fsl.PRELUDE(process3d=True), name='PhaseUnwrap')
    rad2rsec = pe.Node(niu.Function(input_names=['in_file', 'delta_te'],
                                    output_names=['out_file'],
                                    function=rads2radsec),
                       name='ToRadSec')

    baseline = pe.Node(niu.Function(input_names=['in_file', 'index'],
                                    output_names=['out_file'],
                                    function=time_avg),
                       name='Baseline')

    fmm2b0 = pe.Node(ants.Registration(output_warped_image=True),
                     name="FMm_to_B0")
    fmm2b0.inputs.transforms = ['Rigid'] * 2
    fmm2b0.inputs.transform_parameters = [(1.0, )] * 2
    fmm2b0.inputs.number_of_iterations = [[50], [20]]
    fmm2b0.inputs.dimension = 3
    fmm2b0.inputs.metric = ['Mattes', 'Mattes']
    fmm2b0.inputs.metric_weight = [1.0] * 2
    fmm2b0.inputs.radius_or_number_of_bins = [64, 64]
    fmm2b0.inputs.sampling_strategy = ['Regular', 'Random']
    fmm2b0.inputs.sampling_percentage = [None, 0.2]
    fmm2b0.inputs.convergence_threshold = [1.e-5, 1.e-8]
    fmm2b0.inputs.convergence_window_size = [20, 10]
    fmm2b0.inputs.smoothing_sigmas = [[6.0], [2.0]]
    fmm2b0.inputs.sigma_units = ['vox'] * 2
    fmm2b0.inputs.shrink_factors = [[6], [1]]  # ,[1] ]
    fmm2b0.inputs.use_estimate_learning_rate_once = [True] * 2
    fmm2b0.inputs.use_histogram_matching = [True] * 2
    fmm2b0.inputs.initial_moving_transform_com = 0
    fmm2b0.inputs.collapse_output_transforms = True
    fmm2b0.inputs.winsorize_upper_quantile = 0.995

    applyxfm = pe.Node(ants.ApplyTransforms(dimension=3, interpolation=interp),
                       name='FMp_to_B0')

    pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='PreliminaryFugue')
    demean = pe.Node(niu.Function(input_names=['in_file', 'in_mask'],
                                  output_names=['out_file'],
                                  function=demean_image),
                     name='DemeanFmap')

    cleanup = cleanup_edge_pipeline()

    addvol = pe.Node(niu.Function(input_names=['in_file'],
                                  output_names=['out_file'],
                                  function=add_empty_vol),
                     name='AddEmptyVol')

    vsm = pe.Node(fsl.FUGUE(save_shift=True, **fugue_params),
                  name="ComputeVSM")

    split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
    merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs')
    unwarp = pe.MapNode(fsl.FUGUE(icorr=True, forward_warping=False),
                        iterfield=['in_file'],
                        name='UnwarpDWIs')
    thres = pe.MapNode(fsl.Threshold(thresh=0.0),
                       iterfield=['in_file'],
                       name='RemoveNegative')
    vsm2dfm = vsm2warp()
    vsm2dfm.inputs.inputnode.scaling = 1.0

    wf = pe.Workflow(name=name)
    wf.connect([(inputnode, r_params, [('settings', 'in_file')]),
                (r_params, eff_echo, [('echospacing', 'echospacing'),
                                      ('acc_factor', 'acc_factor')]),
                (inputnode, pha2rads, [('bmap_pha', 'in_file')]),
                (inputnode, firstmag, [('bmap_mag', 'in_file')]),
                (inputnode, baseline, [('in_file', 'in_file'),
                                       ('in_ref', 'index')]),
                (firstmag, n4, [('roi_file', 'input_image')]),
                (n4, bet, [('output_image', 'in_file')]),
                (bet, dilate, [('mask_file', 'in_file')]),
                (pha2rads, prelude, [('out_file', 'phase_file')]),
                (n4, prelude, [('output_image', 'magnitude_file')]),
                (dilate, prelude, [('out_file', 'mask_file')]),
                (r_params, rad2rsec, [('delta_te', 'delta_te')]),
                (prelude, rad2rsec, [('unwrapped_phase_file', 'in_file')]),
                (baseline, fmm2b0, [('out_file', 'fixed_image')]),
                (n4, fmm2b0, [('output_image', 'moving_image')]),
                (inputnode, fmm2b0, [('in_mask', 'fixed_image_mask')]),
                (dilate, fmm2b0, [('out_file', 'moving_image_mask')]),
                (baseline, applyxfm, [('out_file', 'reference_image')]),
                (rad2rsec, applyxfm, [('out_file', 'input_image')]),
                (fmm2b0, applyxfm, [('forward_transforms', 'transforms'),
                                    ('forward_invert_flags',
                                     'invert_transform_flags')]),
                (applyxfm, pre_fugue, [('output_image', 'fmap_in_file')]),
                (inputnode, pre_fugue, [('in_mask', 'mask_file')]),
                (pre_fugue, demean, [('fmap_out_file', 'in_file')]),
                (inputnode, demean, [('in_mask', 'in_mask')]),
                (demean, cleanup, [('out_file', 'inputnode.in_file')]),
                (inputnode, cleanup, [('in_mask', 'inputnode.in_mask')]),
                (cleanup, addvol, [('outputnode.out_file', 'in_file')]),
                (inputnode, vsm, [('in_mask', 'mask_file')]),
                (addvol, vsm, [('out_file', 'fmap_in_file')]),
                (r_params, vsm, [('delta_te', 'asym_se_time')]),
                (eff_echo, vsm, [('eff_echo', 'dwell_time')]),
                (inputnode, split, [('in_file', 'in_file')]),
                (split, unwarp, [('out_files', 'in_file')]),
                (vsm, unwarp, [('shift_out_file', 'shift_in_file')]),
                (r_params, unwarp, [(('enc_dir', _fix_enc_dir),
                                     'unwarp_direction')]),
                (unwarp, thres, [('unwarped_file', 'in_file')]),
                (thres, merge, [('out_file', 'in_files')]),
                (r_params, vsm2dfm, [(('enc_dir',
                                       _fix_enc_dir), 'inputnode.enc_dir')]),
                (merge, vsm2dfm, [('merged_file', 'inputnode.in_ref')]),
                (vsm, vsm2dfm, [('shift_out_file', 'inputnode.in_vsm')]),
                (merge, outputnode, [('merged_file', 'out_file')]),
                (vsm, outputnode, [('shift_out_file', 'out_vsm')]),
                (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')])])
    return wf
示例#14
0
def CreateTissueClassifyWorkflow(WFname, master_config, InterpolationMode,
                                 UseRegistrationMasking):
    from nipype.interfaces import ants

    CLUSTER_QUEUE = master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']

    tissueClassifyWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'T1List', 'T2List', 'PDList', 'FLList', 'OtherList', 'T1_count',
        'PrimaryT1', 'atlasDefinition', 'atlasToSubjectInitialTransform',
        'atlasVolume'
    ]),
                         run_without_submitting=True,
                         name='inputspec')
    outputsSpec = pe.Node(
        interface=IdentityInterface(fields=[
            'atlasToSubjectTransform',
            'atlasToSubjectInverseTransform',
            'atlasToSubjectRegistrationState',
            'outputLabels',
            'outputHeadLabels',  # ???
            #'t1_corrected', 't2_corrected',
            't1_average',
            't2_average',
            'pd_average',
            'fl_average',
            'posteriorImages',
        ]),
        run_without_submitting=True,
        name='outputspec')

    ########################################################
    # Run BABCext on Multi-modal images
    ########################################################
    makeOutImageList = pe.Node(Function(
        function=MakeOutFileList,
        input_names=[
            'T1List', 'T2List', 'PDList', 'FLList', 'OtherList', 'postfix',
            'PrimaryT1'
        ],
        output_names=['inImageList', 'outImageList', 'imageTypeList']),
                               run_without_submitting=True,
                               name="99_makeOutImageList")
    tissueClassifyWF.connect(inputsSpec, 'T1List', makeOutImageList, 'T1List')
    tissueClassifyWF.connect(inputsSpec, 'T2List', makeOutImageList, 'T2List')
    tissueClassifyWF.connect(inputsSpec, 'PDList', makeOutImageList, 'PDList')
    tissueClassifyWF.connect(inputsSpec, 'PrimaryT1', makeOutImageList,
                             'PrimaryT1')
    makeOutImageList.inputs.FLList = []  # an emptyList HACK
    makeOutImageList.inputs.postfix = "_corrected.nii.gz"
    # HACK tissueClassifyWF.connect( inputsSpec, 'FLList', makeOutImageList, 'FLList' )
    tissueClassifyWF.connect(inputsSpec, 'OtherList', makeOutImageList,
                             'OtherList')

    ##### Initialize with ANTS Transform For AffineComponentBABC
    currentAtlasToSubjectantsRigidRegistration = 'AtlasToSubjectANTsPreABC_Rigid'
    A2SantsRegistrationPreABCRigid = pe.Node(
        interface=ants.Registration(),
        name=currentAtlasToSubjectantsRigidRegistration)
    many_cpu_ANTsRigid_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 4, 2, 8),
        'overwrite': True
    }
    A2SantsRegistrationPreABCRigid.plugin_args = many_cpu_ANTsRigid_options_dictionary

    A2SantsRegistrationPreABCRigid.inputs.num_threads = -1
    A2SantsRegistrationPreABCRigid.inputs.dimension = 3
    A2SantsRegistrationPreABCRigid.inputs.transforms = [
        "Affine",
    ]
    A2SantsRegistrationPreABCRigid.inputs.transform_parameters = [[0.1]]
    A2SantsRegistrationPreABCRigid.inputs.metric = ['MI']
    A2SantsRegistrationPreABCRigid.inputs.sampling_strategy = ['Regular']
    A2SantsRegistrationPreABCRigid.inputs.sampling_percentage = [0.5]
    A2SantsRegistrationPreABCRigid.inputs.metric_weight = [1.0]
    A2SantsRegistrationPreABCRigid.inputs.radius_or_number_of_bins = [32]
    A2SantsRegistrationPreABCRigid.inputs.number_of_iterations = [[
        1000, 1000, 500, 100
    ]]

    A2SantsRegistrationPreABCRigid.inputs.convergence_threshold = [1e-8]

    A2SantsRegistrationPreABCRigid.inputs.convergence_window_size = [10]
    A2SantsRegistrationPreABCRigid.inputs.use_histogram_matching = [True]
    A2SantsRegistrationPreABCRigid.inputs.shrink_factors = [[8, 4, 2, 1]]
    A2SantsRegistrationPreABCRigid.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    A2SantsRegistrationPreABCRigid.inputs.sigma_units = ["vox"]
    A2SantsRegistrationPreABCRigid.inputs.use_estimate_learning_rate_once = [
        False
    ]
    A2SantsRegistrationPreABCRigid.inputs.write_composite_transform = True  # Required for initialize_transforms_per_stage
    A2SantsRegistrationPreABCRigid.inputs.collapse_output_transforms = False  # Mutually Exclusive with initialize_transforms_per_stage
    A2SantsRegistrationPreABCRigid.inputs.initialize_transforms_per_stage = True
    A2SantsRegistrationPreABCRigid.inputs.output_transform_prefix = 'AtlasToSubjectPreBABC_Rigid'
    A2SantsRegistrationPreABCRigid.inputs.winsorize_lower_quantile = 0.01
    A2SantsRegistrationPreABCRigid.inputs.winsorize_upper_quantile = 0.99
    A2SantsRegistrationPreABCRigid.inputs.output_warped_image = 'atlas2subjectRigid.nii.gz'
    A2SantsRegistrationPreABCRigid.inputs.output_inverse_warped_image = 'subject2atlasRigid.nii.gz'

    tissueClassifyWF.connect(inputsSpec, 'atlasToSubjectInitialTransform',
                             A2SantsRegistrationPreABCRigid,
                             'initial_moving_transform')
    tissueClassifyWF.connect(inputsSpec, 'PrimaryT1',
                             A2SantsRegistrationPreABCRigid, 'fixed_image')
    tissueClassifyWF.connect(inputsSpec, 'atlasVolume',
                             A2SantsRegistrationPreABCRigid, 'moving_image')

    ##### Initialize with ANTS Transform For SyN component BABC
    currentAtlasToSubjectantsRegistration = 'AtlasToSubjectANTsPreABC_SyN'
    A2SantsRegistrationPreABCSyN = pe.Node(
        interface=ants.Registration(),
        name=currentAtlasToSubjectantsRegistration)
    many_cpu_ANTsSyN_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 12),
        'overwrite': True
    }
    A2SantsRegistrationPreABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary

    A2SantsRegistrationPreABCSyN.inputs.num_threads = -1
    A2SantsRegistrationPreABCSyN.inputs.dimension = 3
    A2SantsRegistrationPreABCSyN.inputs.transforms = ["SyN", "SyN"]
    A2SantsRegistrationPreABCSyN.inputs.transform_parameters = [[0.1, 3, 0],
                                                                [0.1, 3, 0]]
    A2SantsRegistrationPreABCSyN.inputs.metric = ['CC', 'CC']
    A2SantsRegistrationPreABCSyN.inputs.sampling_strategy = [None, None]
    A2SantsRegistrationPreABCSyN.inputs.sampling_percentage = [1.0, 1.0]
    A2SantsRegistrationPreABCSyN.inputs.metric_weight = [1.0, 1.0]
    A2SantsRegistrationPreABCSyN.inputs.radius_or_number_of_bins = [4, 4]
    A2SantsRegistrationPreABCSyN.inputs.number_of_iterations = [[500, 500],
                                                                [500, 70]]

    A2SantsRegistrationPreABCSyN.inputs.convergence_threshold = [1e-8, 1e-6]

    A2SantsRegistrationPreABCSyN.inputs.convergence_window_size = [12]
    A2SantsRegistrationPreABCSyN.inputs.use_histogram_matching = [True, True]
    A2SantsRegistrationPreABCSyN.inputs.shrink_factors = [[8, 4], [2, 1]]
    A2SantsRegistrationPreABCSyN.inputs.smoothing_sigmas = [[3, 2], [1, 0]]
    A2SantsRegistrationPreABCSyN.inputs.sigma_units = ["vox", "vox"]
    A2SantsRegistrationPreABCSyN.inputs.use_estimate_learning_rate_once = [
        False, False
    ]
    A2SantsRegistrationPreABCSyN.inputs.write_composite_transform = True  # Required for initialize_transforms_per_stage
    A2SantsRegistrationPreABCSyN.inputs.collapse_output_transforms = False  # Mutually Exclusive with initialize_transforms_per_stage
    A2SantsRegistrationPreABCSyN.inputs.initialize_transforms_per_stage = True
    A2SantsRegistrationPreABCSyN.inputs.save_state = 'SavedInternalSyNState.h5'
    A2SantsRegistrationPreABCSyN.inputs.output_transform_prefix = 'AtlasToSubjectPreBABC_SyN'
    A2SantsRegistrationPreABCSyN.inputs.winsorize_lower_quantile = 0.01
    A2SantsRegistrationPreABCSyN.inputs.winsorize_upper_quantile = 0.99
    A2SantsRegistrationPreABCSyN.inputs.output_warped_image = 'atlas2subject.nii.gz'
    A2SantsRegistrationPreABCSyN.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'

    ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
    if UseRegistrationMasking == True:
        from SEMTools.segmentation.specialized import BRAINSROIAuto

        fixedROIAuto = pe.Node(interface=BRAINSROIAuto(),
                               name="fixedImageROIAUTOMask")
        fixedROIAuto.inputs.ROIAutoDilateSize = 10
        fixedROIAuto.inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz"

        movingROIAuto = pe.Node(interface=BRAINSROIAuto(),
                                name="movingImageROIAUTOMask")
        fixedROIAuto.inputs.ROIAutoDilateSize = 10
        movingROIAuto.inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz"

        tissueClassifyWF.connect(inputsSpec, 'PrimaryT1', fixedROIAuto,
                                 'inputVolume')
        tissueClassifyWF.connect(inputsSpec, 'atlasVolume', movingROIAuto,
                                 'inputVolume')

        tissueClassifyWF.connect(fixedROIAuto, 'outputROIMaskVolume',
                                 A2SantsRegistrationPreABCRigid,
                                 'fixed_image_mask')
        tissueClassifyWF.connect(movingROIAuto, 'outputROIMaskVolume',
                                 A2SantsRegistrationPreABCRigid,
                                 'moving_image_mask')

        tissueClassifyWF.connect(fixedROIAuto, 'outputROIMaskVolume',
                                 A2SantsRegistrationPreABCSyN,
                                 'fixed_image_mask')
        tissueClassifyWF.connect(movingROIAuto, 'outputROIMaskVolume',
                                 A2SantsRegistrationPreABCSyN,
                                 'moving_image_mask')

    tissueClassifyWF.connect(
        A2SantsRegistrationPreABCRigid,
        ('composite_transform', getListIndexOrNoneIfOutOfRange, 0),
        A2SantsRegistrationPreABCSyN, 'initial_moving_transform')
    tissueClassifyWF.connect(inputsSpec, 'PrimaryT1',
                             A2SantsRegistrationPreABCSyN, 'fixed_image')
    tissueClassifyWF.connect(inputsSpec, 'atlasVolume',
                             A2SantsRegistrationPreABCSyN, 'moving_image')

    BABCext = pe.Node(interface=BRAINSABCext(), name="BABC")
    many_cpu_BABC_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 8, 2, 4),
        'overwrite': True
    }
    BABCext.plugin_args = many_cpu_BABC_options_dictionary
    tissueClassifyWF.connect(makeOutImageList, 'inImageList', BABCext,
                             'inputVolumes')
    tissueClassifyWF.connect(makeOutImageList, 'imageTypeList', BABCext,
                             'inputVolumeTypes')
    tissueClassifyWF.connect(makeOutImageList, 'outImageList', BABCext,
                             'outputVolumes')
    BABCext.inputs.debuglevel = 0
    BABCext.inputs.useKNN = True
    BABCext.inputs.maxIterations = 3
    BABCext.inputs.maxBiasDegree = 4
    BABCext.inputs.filterIteration = 3
    BABCext.inputs.filterMethod = 'GradientAnisotropicDiffusion'
    BABCext.inputs.atlasToSubjectTransformType = 'SyN'
    BABCext.inputs.gridSize = [10, 10, 10]
    BABCext.inputs.outputFormat = "NIFTI"
    BABCext.inputs.outputLabels = "brain_label_seg.nii.gz"
    BABCext.inputs.outputDirtyLabels = "volume_label_seg.nii.gz"
    BABCext.inputs.posteriorTemplate = "POSTERIOR_%s.nii.gz"
    BABCext.inputs.atlasToSubjectTransform = "atlas_to_subject.h5"
    # BABCext.inputs.implicitOutputs = ['t1_average_BRAINSABC.nii.gz', 't2_average_BRAINSABC.nii.gz']
    BABCext.inputs.interpolationMode = InterpolationMode
    BABCext.inputs.outputDir = './'
    BABCext.inputs.saveState = 'SavedBABCInternalSyNState.h5'

    tissueClassifyWF.connect(inputsSpec, 'atlasDefinition', BABCext,
                             'atlasDefinition')
    # NOTE: MUTUALLY EXCLUSIVE with restoreState
    #tissueClassifyWF.connect(A2SantsRegistrationPreABCSyN,
    #                         ( 'composite_transform', getListIndexOrNoneIfOutOfRange, 0 ),
    #                         BABCext, 'atlasToSubjectInitialTransform')
    tissueClassifyWF.connect(A2SantsRegistrationPreABCSyN, 'save_state',
                             BABCext, 'restoreState')
    """
    Get the first T1 and T2 corrected images from BABCext
    """
    """ HACK:  THIS IS NOT NEEDED!  We should use the averged t1 and averaged t2 images instead!
    def get_first_T1_and_T2(in_files,T1_count):
        '''
        Returns the first T1 and T2 file in in_files, based on offset in T1_count.
        '''
        return in_files[0],in_files[T1_count]
    bfc_files = pe.Node(Function(input_names=['in_files','T1_count'],
                               output_names=['t1_corrected','t2_corrected'],
                               function=get_first_T1_and_T2), run_without_submitting=True, name='99_bfc_files' )
    tissueClassifyWF.connect( inputsSpec, 'T1_count', bfc_files, 'T1_count')
    tissueClassifyWF.connect(BABCext,'outputVolumes',bfc_files, 'in_files')


    tissueClassifyWF.connect(bfc_files,'t1_corrected',outputsSpec,'t1_corrected')
    tissueClassifyWF.connect(bfc_files,'t2_corrected',outputsSpec,'t2_corrected')
    #tissueClassifyWF.connect(bfc_files,'pd_corrected',outputsSpec,'pd_corrected')
    #tissueClassifyWF.connect(bfc_files,'fl_corrected',outputsSpec,'fl_corrected')

    """

    #############
    tissueClassifyWF.connect(BABCext, 'saveState', outputsSpec,
                             'atlasToSubjectRegistrationState')

    tissueClassifyWF.connect(BABCext, 'atlasToSubjectTransform', outputsSpec,
                             'atlasToSubjectTransform')

    def MakeInverseTransformFileName(TransformFileName):
        """### HACK:  This function is to work around a deficiency in BRAINSABCext where the inverse transform name is not being computed properly
          in the list outputs"""
        fixed_inverse_name = TransformFileName.replace(".h5", "_Inverse.h5")
        return [fixed_inverse_name]

    tissueClassifyWF.connect([
        (BABCext, outputsSpec, [(('atlasToSubjectTransform',
                                  MakeInverseTransformFileName),
                                 "atlasToSubjectInverseTransform")]),
    ])
    tissueClassifyWF.connect(BABCext, 'outputLabels', outputsSpec,
                             'outputLabels')
    tissueClassifyWF.connect(BABCext, 'outputDirtyLabels', outputsSpec,
                             'outputHeadLabels')

    tissueClassifyWF.connect(BABCext, 'outputT1AverageImage', outputsSpec,
                             't1_average')
    tissueClassifyWF.connect(BABCext, 'outputT2AverageImage', outputsSpec,
                             't2_average')
    tissueClassifyWF.connect(BABCext, 'outputPDAverageImage', outputsSpec,
                             'pd_average')
    tissueClassifyWF.connect(BABCext, 'outputFLAverageImage', outputsSpec,
                             'fl_average')
    ##  remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 0 ), "t1_average")] ), ] )
    ##  remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 1 ), "t2_average")] ), ] )
    ##  remove tissueClassifyWF.connect( [ ( BABCext, outputsSpec, [ (( 'outputAverageImages', getListIndexOrNoneIfOutOfRange, 2 ), "pd_average")] ), ] )

    MakePosteriorDictionaryNode = pe.Node(Function(
        function=MakePosteriorDictionaryFunc,
        input_names=['posteriorImages'],
        output_names=['posteriorDictionary']),
                                          run_without_submitting=True,
                                          name="99_makePosteriorDictionary")
    tissueClassifyWF.connect(BABCext, 'posteriorImages',
                             MakePosteriorDictionaryNode, 'posteriorImages')

    tissueClassifyWF.connect(MakePosteriorDictionaryNode,
                             'posteriorDictionary', outputsSpec,
                             'posteriorImages')

    return tissueClassifyWF
def register(warped_dir, subject_Tws, atlas_images, atlas_segmentations,
             n_jobs):

    #create list for subject T1w and T2w because Nipype requires inputs to be in list format specifically fr JLF node
    sub_T1w_list = []
    sub_T1w_list.append(subject_Tws[0])

    sub_T2w_list = []
    sub_T2w_list.append(subject_Tws[1])

    input_spec = pe.Node(utility.IdentityInterface(fields=[
        'subject_Txw', 'subject_Txw_list', 'subject_dual_Tws', 'atlas_image',
        'atlas_segmentation'
    ]),
                         iterables=[('atlas_image', atlas_images),
                                    ('atlas_segmentation', atlas_segmentations)
                                    ],
                         synchronize=True,
                         name='input_spec')
    # set input_spec
    input_spec.inputs.subject_Txw = subject_Tws[0]  #using T1w here
    input_spec.inputs.subject_Txw_list = sub_T1w_list
    input_spec.inputs.subject_dual_Tws = subject_Tws
    '''
    CC[x, x, 1, 8]: [fixed, moving, weight, radius]
    -t SyN[0.25]: Syn transform with a gradient step of 0.25
    -r Gauss[3, 0]: sigma 0
    -I 30x50x20
    use - Histogram - Matching
    number - of - affine - iterations 10000x10000x10000x10000: 4 level image pyramid with 10000 iterations at each level
    MI - option 32x16000: 32 bins, 16000 samples
    '''

    reg = pe.Node(
        ants.Registration(
            dimension=3,
            output_transform_prefix="output_",
            #interpolation='BSpline',
            transforms=['Affine', 'SyN'],
            transform_parameters=[(2.0, ), (0.25, )],  #default values syn
            shrink_factors=[[8, 4, 2, 1], [4, 2, 1]],
            smoothing_sigmas=[[3, 2, 1, 0], [2, 1, 0]],  #None for Syn?
            sigma_units=['vox'] * 2,
            sampling_percentage=[0.05, None],  #just use default?
            sampling_strategy=['Random', 'None'],
            number_of_iterations=[[10000, 10000, 10000, 10000], [30, 50, 20]],
            metric=['MI', 'CC'],
            metric_weight=[1, 1],
            radius_or_number_of_bins=[(32), (8)],
            #winsorize_lower_quantile=0.05,
            #winsorize_upper_quantile=0.95,
            verbose=True,
            use_histogram_matching=[True, True]),
        name='calc_registration')

    applytransforms_atlas = pe.Node(ants.ApplyTransforms(
        interpolation='BSpline',
        dimension=3,
    ),
                                    name='apply_warpfield_atlas')

    applytransforms_segs = pe.Node(ants.ApplyTransforms(
        interpolation='NearestNeighbor', dimension=3),
                                   name='apply_warpfield_segs')

    jointlabelfusion = pe.JoinNode(
        ants.AntsJointFusion(
            dimension=3,
            alpha=0.1,
            beta=2.0,
            patch_radius=[2, 2, 2],
            search_radius=[3, 3, 3],
            out_label_fusion='out_label_fusion.nii.gz',
        ),
        joinsource='input_spec',
        joinfield=['atlas_image', 'atlas_segmentation_image'],
        name='joint_label_fusion')

    wf = pe.Workflow(name='wf', base_dir=warped_dir)

    wf.connect(input_spec, 'subject_Txw', reg, 'fixed_image')
    wf.connect(input_spec, 'atlas_image', reg, 'moving_image')

    wf.connect(reg, 'forward_transforms', applytransforms_atlas, 'transforms')
    wf.connect(input_spec, 'atlas_image', applytransforms_atlas, 'input_image')
    wf.connect(input_spec, 'subject_Txw', applytransforms_atlas,
               'reference_image')

    wf.connect(reg, 'forward_transforms', applytransforms_segs, 'transforms')
    wf.connect(input_spec, 'atlas_segmentation', applytransforms_segs,
               'input_image')
    wf.connect(input_spec, 'subject_Txw', applytransforms_segs,
               'reference_image')

    wf.connect(input_spec, 'subject_dual_Tws', jointlabelfusion,
               'target_image')
    wf.connect(applytransforms_atlas, 'output_image', jointlabelfusion,
               'atlas_image')
    wf.connect(applytransforms_segs, 'output_image', jointlabelfusion,
               'atlas_segmentation_image')

    wf.config['execution']['parameterize_dirs'] = False

    #create workflow graph
    wf.write_graph()

    #Nipype plugins specify how workflow should be executed
    output = wf.run(plugin='MultiProc', plugin_args={'n_procs': n_jobs})
示例#16
0
def create_workflow(config: AttrDict, resource_pool: ResourcePool,
                    context: Context):
    for _, rp in resource_pool[['label-reorient_T1w']]:
        anat = rp[R('T1w', label='reorient')]
        train_model = UNet2d(dim_in=config.dim_in,
                             num_conv_block=config.num_conv_block,
                             kernel_root=config.kernel_root)
        if config.unet_model.lower().startswith('s3://'):
            unet_path = S3Resource(config.unet_model,
                                   working_dir=tempfile.mkdtemp())()
        else:
            unet_path = config.unet_model
        checkpoint = torch.load(unet_path, map_location={'cuda:0': 'cpu'})
        train_model.load_state_dict(checkpoint['state_dict'])
        model = nn.Sequential(train_model, nn.Softmax2d())

        # create a node called unet_mask
        unet_mask = PythonJob(function=predict_volumes, reference='unet_mask')
        unet_mask.model = Resource(model)
        unet_mask.cimg_in = anat
        """
        Revised mask with ANTs
        """
        # fslmaths <whole head> -mul <mask> brain.nii.gz
        unet_masked_brain = NipypeJob(
            interface=fsl.MultiImageMaths(op_string="-mul %s"),
            reference='unet_masked_brain')
        unet_masked_brain.in_file = anat
        unet_masked_brain.operand_files = unet_mask.output_path

        # flirt -v -dof 6 -in brain.nii.gz -ref NMT_SS_0.5mm.nii.gz -o brain_rot2atl -omat brain_rot2atl.mat -interp sinc
        # TODO change it to ANTs linear transform
        native_brain_to_template_brain = NipypeJob(
            interface=fsl.FLIRT(reference=config.template_brain_only_for_anat,
                                dof=6,
                                interp='sinc'),
            reference='native_brain_to_template_brain')
        native_brain_to_template_brain.in_file = unet_masked_brain.out_file

        # flirt -in head.nii.gz -ref NMT_0.5mm.nii.gz -o head_rot2atl -applyxfm -init brain_rot2atl.mat
        # TODO change it to ANTs linear transform
        native_head_to_template_head = NipypeJob(
            interface=fsl.FLIRT(reference=config.template_skull_for_anat,
                                apply_xfm=True),
            reference='native_head_to_template_head')
        native_head_to_template_head.in_file = anat
        native_head_to_template_head.in_matrix_file = native_brain_to_template_brain.out_matrix_file

        # fslmaths NMT_SS_0.5mm.nii.gz -bin templateMask.nii.gz
        template_brain_mask = NipypeJob(
            interface=fsl.maths.MathsCommand(args='-bin'),
            reference='template_brain_mask')
        template_brain_mask.in_file = config.template_brain_only_for_anat

        # ANTS 3 -m  CC[head_rot2atl.nii.gz,NMT_0.5mm.nii.gz,1,5] -t SyN[0.25] -r Gauss[3,0] -o atl2T1rot -i 60x50x20 --use-Histogram-Matching  --number-of-affine-iterations 10000x10000x10000x10000x10000 --MI-option 32x16000
        ants_template_head_to_template = NipypeJob(
            interface=ants.Registration(),
            reference='template_head_to_template')
        ants_template_head_to_template.metric = ['CC']
        ants_template_head_to_template.metric_weight = [1, 5]
        ants_template_head_to_template.moving_image = config.template_skull_for_anat
        ants_template_head_to_template.transforms = ['SyN']
        ants_template_head_to_template.transform_parameters = [(0.25, )]
        ants_template_head_to_template.interpolation = 'NearestNeighbor'
        ants_template_head_to_template.number_of_iterations = [[60, 50, 20]]
        ants_template_head_to_template.smoothing_sigmas = [[0.6, 0.2, 0.0]]
        ants_template_head_to_template.shrink_factors = [[4, 2, 1]]
        ants_template_head_to_template.convergence_threshold = [1.e-8]

        ants_template_head_to_template.fixed_image = native_head_to_template_head.out_file

        # antsApplyTransforms -d 3 -i templateMask.nii.gz -t atl2T1rotWarp.nii.gz atl2T1rotAffine.txt -r brain_rot2atl.nii.gz -o brain_rot2atl_mask.nii.gz
        template_head_transform_to_template = NipypeJob(
            interface=ants.ApplyTransforms(dimension=3),
            reference='template_head_transform_to_template')
        template_head_transform_to_template.input_image = template_brain_mask.out_file
        template_head_transform_to_template.reference_image = native_brain_to_template_brain.out_file
        template_head_transform_to_template.transforms = ants_template_head_to_template.forward_transforms

        # convert_xfm -omat brain_rot2native.mat -inverse brain_rot2atl.mat
        invt = NipypeJob(interface=fsl.ConvertXFM(invert_xfm=True),
                         reference='convert_xfm')
        invt.in_file = native_brain_to_template_brain.out_matrix_file

        # flirt -in brain_rot2atl_mask.nii.gz -ref brain.nii.gz -o brain_mask.nii.gz -applyxfm -init brain_rot2native.mat
        template_brain_to_native_brain = NipypeJob(
            interface=fsl.FLIRT(apply_xfm=True),
            reference='template_brain_to_native_brain')
        template_brain_to_native_brain.in_file = template_head_transform_to_template.output_image
        template_brain_to_native_brain.reference = unet_masked_brain.out_file
        template_brain_to_native_brain.in_matrix_file = invt.out_file

        # fslmaths brain_mask.nii.gz -thr .5 -bin brain_mask_thr.nii.gz
        refined_mask = NipypeJob(interface=fsl.Threshold(thresh=0.5,
                                                         args='-bin'),
                                 reference='refined_mask')
        refined_mask.in_file = template_brain_to_native_brain.out_file

        # get a new brain with mask
        refined_brain = NipypeJob(
            interface=fsl.MultiImageMaths(op_string="-mul %s"),
            reference='refined_brain')
        refined_brain.in_file = anat
        refined_brain.operand_files = refined_mask.out_file

        rp[R('T1w', desc='skullstrip-unet',
             suffix='mask')] = refined_mask.out_file
        rp[R('T1w', desc='skullstrip-unet',
             suffix='brain')] = refined_brain.out_file
示例#17
0
文件: nodes.py 项目: dancebean/SAMRI
def functional_registration(
    template,
    mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
    num_threads=4,
    phase_dictionary=GENERIC_PHASES,
    f_phases=["s_rigid", "affine", "syn"],
):

    template = path.abspath(path.expanduser(template))

    f_parameters = [phase_dictionary[selection] for selection in f_phases]

    f_registration = pe.Node(ants.Registration(), name="f_register")
    f_registration.inputs.fixed_image = template
    f_registration.inputs.output_transform_prefix = "output_"
    f_registration.inputs.transforms = [i["transforms"]
                                        for i in f_parameters]  ##
    f_registration.inputs.transform_parameters = [
        i["transform_parameters"] for i in f_parameters
    ]  ##
    f_registration.inputs.number_of_iterations = [
        i["number_of_iterations"] for i in f_parameters
    ]  #
    f_registration.inputs.dimension = 3
    f_registration.inputs.write_composite_transform = True
    f_registration.inputs.collapse_output_transforms = True
    f_registration.inputs.initial_moving_transform_com = True
    f_registration.inputs.metric = [i["metric"] for i in f_parameters]
    f_registration.inputs.metric_weight = [
        i["metric_weight"] for i in f_parameters
    ]
    f_registration.inputs.radius_or_number_of_bins = [
        i["radius_or_number_of_bins"] for i in f_parameters
    ]
    f_registration.inputs.sampling_strategy = [
        i["sampling_strategy"] for i in f_parameters
    ]
    f_registration.inputs.sampling_percentage = [
        i["sampling_percentage"] for i in f_parameters
    ]
    f_registration.inputs.convergence_threshold = [
        i["convergence_threshold"] for i in f_parameters
    ]
    f_registration.inputs.convergence_window_size = [
        i["convergence_window_size"] for i in f_parameters
    ]
    f_registration.inputs.smoothing_sigmas = [
        i["smoothing_sigmas"] for i in f_parameters
    ]
    f_registration.inputs.sigma_units = [
        i["sigma_units"] for i in f_parameters
    ]
    f_registration.inputs.shrink_factors = [
        i["shrink_factors"] for i in f_parameters
    ]
    f_registration.inputs.use_estimate_learning_rate_once = [
        i["use_estimate_learning_rate_once"] for i in f_parameters
    ]
    f_registration.inputs.use_histogram_matching = [
        i["use_histogram_matching"] for i in f_parameters
    ]
    f_registration.inputs.winsorize_lower_quantile = 0.05
    f_registration.inputs.winsorize_upper_quantile = 0.95
    f_registration.inputs.args = '--float'
    if mask:
        f_registration.inputs.fixed_image_masks = [
            path.abspath(path.expanduser(mask))
        ]
    f_registration.inputs.num_threads = num_threads

    warp = pe.Node(ants.ApplyTransforms(), name="f_warp")
    warp.inputs.reference_image = template
    warp.inputs.input_image_type = 3
    warp.inputs.interpolation = 'NearestNeighbor'
    warp.inputs.invert_transform_flags = [False]
    warp.inputs.terminal_output = 'file'
    warp.num_threads = 4

    return f_registration, warp
示例#18
0
# fsl.FSLCommand.set_default_output_type('NIFTI')


#========================================================================================================
# In[6]:

template_brain = '/home/in/aeed/ohbm/FA_template/adni_FAtemplate.nii.gz'
template_2_atlas_trans = '/home/in/aeed/ohbm/data_ohbm/adni_2_JHU_Composite.h5'
#========================================================================================================


## normalizing the anatomical_bias_corrected image to the common anatomical template
## Here only we are calculating the paramters, we apply them later.

reg_FA_2_temp = Node(ants.Registration(), name = 'reg_FA_2_temp')
reg_FA_2_temp.inputs.args='--float'
reg_FA_2_temp.inputs.collapse_output_transforms=True
reg_FA_2_temp.inputs.fixed_image=template_brain
reg_FA_2_temp.inputs.initial_moving_transform_com=True
reg_FA_2_temp.inputs.num_threads=8
reg_FA_2_temp.inputs.output_inverse_warped_image=True
reg_FA_2_temp.inputs.output_warped_image=True
reg_FA_2_temp.inputs.sigma_units=['vox']*3
reg_FA_2_temp.inputs.transforms= ['Rigid', 'Affine', 'SyN']
reg_FA_2_temp.inputs.winsorize_lower_quantile=0.005
reg_FA_2_temp.inputs.winsorize_upper_quantile=0.995
reg_FA_2_temp.inputs.convergence_threshold=[1e-06]
reg_FA_2_temp.inputs.convergence_window_size=[10]
reg_FA_2_temp.inputs.metric=['MI', 'MI', 'CC']
reg_FA_2_temp.inputs.metric_weight=[1.0]*3
示例#19
0
def structural_rigid(
    template="/Users/marksm/GitHub/mriPipeline/ants_test/template4.nii.gz",
    input_image="/Users/marksm/GitHub/mriPipeline/ants_test/source.nii",
    output_image='hard2_new_32_rigid_affine_more_rigid_CC.nii.gz',
):
    """
	Registers a structural scan to the reference template using rigid body transformation. 
	"""

    template = os.path.abspath(os.path.expanduser(template))
    input_image = os.path.abspath(os.path.expanduser(input_image))
    output_image = os.path.abspath(os.path.expanduser(output_image))

    n4 = ants.N4BiasFieldCorrection()
    n4.inputs.dimension = 3
    n4.inputs.input_image = input_image
    # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
    n4.inputs.bspline_fitting_distance = 100
    n4.inputs.shrink_factor = 2
    n4.inputs.n_iterations = [200, 200, 200, 200]
    n4.inputs.convergence_threshold = 1e-11
    # n4.inputs.output_image = 'ss_n4_{}_ofM{}.nii.gz'.format(participant,i)
    n4.inputs.output_image = 'hard.nii.gz'
    n4_res = n4.run()

    struct_registration = ants.Registration()
    struct_registration.inputs.fixed_image = template
    struct_registration.inputs.output_transform_prefix = "output_"
    struct_registration.inputs.transforms = ['Similarity']  ##
    struct_registration.inputs.transform_parameters = [
        (60., ),
    ]  ##
    struct_registration.inputs.number_of_iterations = [
        [1000, 1000, 200],
    ]  #
    struct_registration.inputs.dimension = 3
    struct_registration.inputs.write_composite_transform = True
    struct_registration.inputs.collapse_output_transforms = True
    struct_registration.inputs.initial_moving_transform_com = True
    # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
    struct_registration.inputs.metric = [
        'MI',
    ]
    struct_registration.inputs.metric_weight = [
        1,
    ]
    struct_registration.inputs.radius_or_number_of_bins = [
        10,
    ]  #
    struct_registration.inputs.sampling_strategy = [
        'Random',
    ]
    struct_registration.inputs.sampling_percentage = [
        0.3,
    ]
    struct_registration.inputs.convergence_threshold = [
        1.e-16,
    ]  #
    struct_registration.inputs.convergence_window_size = [
        10,
    ]
    struct_registration.inputs.smoothing_sigmas = [
        [4, 2, 1],
    ]
    struct_registration.inputs.sigma_units = [
        'vox',
    ]
    struct_registration.inputs.shrink_factors = [
        [4, 2, 1],
    ]
    struct_registration.inputs.use_estimate_learning_rate_once = [
        True,
    ]
    # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
    struct_registration.inputs.use_histogram_matching = [
        False,
    ]
    struct_registration.inputs.winsorize_lower_quantile = 0.005
    struct_registration.inputs.winsorize_upper_quantile = 0.98
    struct_registration.inputs.args = '--float'
    struct_registration.inputs.num_threads = 6

    struct_registration.inputs.moving_image = n4_res.outputs.output_image
    # struct_registration.inputs.output_warped_image = 'ss_{}_ofM{}.nii.gz'.format(participant,i)
    struct_registration.inputs.output_warped_image = output_image
    res = struct_registration.run()
示例#20
0
def sdc_unwarp(name=SDC_UNWARP_NAME, ref_vol=None, method='jac'):
    """
    This workflow takes an estimated fieldmap and a target image and applies TOPUP,
    an :abbr:`SDC (susceptibility-derived distortion correction)` method in FSL to
    unwarp the target image.

    Input fields:
    ~~~~~~~~~~~~~

      inputnode.in_file - the image(s) to which this correction will be applied
      inputnode.in_mask - a brain mask corresponding to the in_file image
      inputnode.fmap_ref - the fieldmap reference (generally, a *magnitude* image or the
                           resulting SE image)
      inputnode.fmap_mask - a brain mask in fieldmap-space
      inputnode.fmap - a fieldmap in Hz
      inputnode.hmc_movpar - the head motion parameters (iff inputnode.in_file is only
                             one 4D file)

    Output fields:
    ~~~~~~~~~~~~~~

      outputnode.out_file - the in_file after susceptibility-distortion correction.

    """

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_file', 'fmap_ref', 'fmap_mask', 'fmap',
                'hmc_movpar']), name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode')

    # Compute movpar file iff we have several images with different
    # PE directions.
    align = pe.Node(niu.Function(
        input_names=['in_files', 'in_movpar'],
        output_names=['out_file', 'ref_vol', 'ref_mask', 'out_movpar'],
        function=_multiple_pe_hmc), name='AlignMultiplePE')
    align.inputs.in_ref = ref_vol

    # Read metadata
    meta = pe.MapNode(niu.Function(
        input_names=['in_file'], output_names=['out_dict'], function=_get_metadata),
        iterfield=['in_file'], name='metadata')

    encfile = pe.Node(interface=niu.Function(
        input_names=['input_images', 'in_dict'], output_names=['parameters_file'],
        function=create_encoding_file), name='TopUp_encfile', updatehash=True)

    fslsplit = pe.Node(fsl.Split(dimension='t'), name='ImageHMCSplit')

    # Register the reference of the fieldmap to the reference
    # of the target image (the one that shall be corrected)
    fmap2ref = pe.Node(ants.Registration(output_warped_image=True),
                       name='Fieldmap2ImageRegistration')

    grabber = nio.JSONFileGrabber()
    setattr(grabber, '_always_run', False)
    fmap2ref_params = pe.Node(grabber, name='Fieldmap2ImageRegistration_params')
    fmap2ref_params.inputs.in_file = (
        pkgr.resource_filename('fmriprep', 'data/fmap-any_registration.json'))

    applyxfm = pe.Node(ants.ApplyTransforms(
        dimension=3, interpolation='Linear'), name='Fieldmap2ImageApply')

    topup_adapt = pe.Node(niu.Function(
        input_names=['in_file', 'in_ref', 'in_movpar'],
        output_names=['out_fieldcoef', 'out_movpar'],
        function=_gen_coeff), name='TopUpAdapt')

    # Use the least-squares method to correct the dropout of the SBRef images
    unwarp = pe.Node(fsl.ApplyTOPUP(method=method), name='TopUpApply')


    workflow.connect([
        (inputnode, meta, [('in_file', 'in_file')]),
        (inputnode, align, [('in_file', 'in_files'),
                            ('hmc_movpar', 'in_movpar')]),
        (inputnode, applyxfm, [('fmap', 'input_image')]),
        (inputnode, encfile, [('in_file', 'input_images')]),
        (inputnode, fmap2ref, [('fmap_ref', 'moving_image'),
                               ('fmap_mask', 'moving_image_mask')]),

        (align, fmap2ref, [('ref_vol', 'fixed_image'),
                           ('ref_mask', 'fixed_image_mask')]),
        (align, applyxfm, [('ref_vol', 'reference_image')]),
        (align, topup_adapt, [('ref_vol', 'in_ref'),
                              ('out_movpar', 'in_movpar')]),

        (meta, encfile, [('out_dict', 'in_dict')]),

        (fmap2ref, applyxfm, [
            ('forward_transforms', 'transforms'),
            ('forward_invert_flags', 'invert_transform_flags')]),
        (align, fslsplit, [('out_file', 'in_file')]),
        (applyxfm, topup_adapt, [('output_image', 'in_file')]),
        (fslsplit, unwarp, [('out_files', 'in_files'),
                            (('out_files', gen_list), 'in_index')]),
        (topup_adapt, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'),
                               ('out_movpar', 'in_topup_movpar')]),
        (encfile, unwarp, [('parameters_file', 'encoding_file')]),
        (unwarp, outputnode, [('out_corrected', 'out_file')])
    ])

    # Connect registration settings in the end, not to clutter the code
    workflow.connect([
        (fmap2ref_params, fmap2ref, [
            ('transforms', 'transforms'),
            ('transform_parameters', 'transform_parameters'),
            ('number_of_iterations', 'number_of_iterations'),
            ('dimension', 'dimension'),
            ('metric', 'metric'),
            ('metric_weight', 'metric_weight'),
            ('radius_or_number_of_bins', 'radius_or_number_of_bins'),
            ('sampling_strategy', 'sampling_strategy'),
            ('sampling_percentage', 'sampling_percentage'),
            ('convergence_threshold', 'convergence_threshold'),
            ('convergence_window_size', 'convergence_window_size'),
            ('smoothing_sigmas', 'smoothing_sigmas'),
            ('sigma_units', 'sigma_units'),
            ('shrink_factors', 'shrink_factors'),
            ('use_estimate_learning_rate_once', 'use_estimate_learning_rate_once'),
            ('use_histogram_matching', 'use_histogram_matching'),
            ('initial_moving_transform_com', 'initial_moving_transform_com'),
            ('collapse_output_transforms', 'collapse_output_transforms'),
            ('winsorize_upper_quantile', 'winsorize_upper_quantile'),
            ('winsorize_lower_quantile', 'winsorize_lower_quantile')
        ])
    ])

    return workflow
示例#21
0
def structural_to_functional_per_participant_test(
    subjects_sessions,
    template="~/GitHub/mriPipeline/templates/waxholm/new/WHS_SD_masked.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
    num_threads=3,
):

    template = os.path.expanduser(template)
    for subject_session in subjects_sessions:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_session))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_session))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}_{}_1_biasCorrection_forRegistration.nii.gz'.format(
                *subject_session.values())
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}_{}_1_biasCorrection_forMasking.nii.gz'.format(
                *subject_session.values())
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET.inputs.out_file = '{}_{}_2_brainExtraction.nii.gz'.format(
                *subject_session.values())
            struct_BET_res = struct_BET.run()

            # we need/can not apply a fill, because the "holes" if any, will be at the rostral edge (touching it, and thus not counting as holes)
            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask.inputs.out_file = '{}_{}_3_brainMasked.nii.gz'.format(
                *subject_session.values())
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = num_threads

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}_{}_4_structuralRegistration.nii.gz'.format(
                *subject_session.values())
            struct_registration_res = struct_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}_{}_5_functionalWarp.nii.gz'.format(
                *subject_session.values())
            warp.num_threads = num_threads

            warp.inputs.input_image = func_image
            warp.inputs.transforms = struct_registration_res.outputs.composite_transform
            warp.run()
    return odi, ficvf


NODDI = Node(name='NODDI',
             interface=Function(input_names=['brain_nii', 'brain_mask_nii'],
                                output_names=['odi', 'ficvf'],
                                function=NODDI))

#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
# In[9]: Transform maps to waxholm Template
#I am going also to use the transformations from Kurtosis pipeline
#Update, it did not work, Now, I am going to register directly to Waxholm and study based ODI template
#>>>>>>>>>>>>>>>>>>>>>>>>>ODI

ODI_to_WAX_Temp = Node(ants.Registration(), name='ODI_To_WAX_Template')
ODI_to_WAX_Temp.inputs.args = '--float'
ODI_to_WAX_Temp.inputs.collapse_output_transforms = True
ODI_to_WAX_Temp.inputs.initial_moving_transform_com = True
ODI_to_WAX_Temp.inputs.fixed_image = Wax_FA_Template
ODI_to_WAX_Temp.inputs.num_threads = 4
ODI_to_WAX_Temp.inputs.output_inverse_warped_image = True
ODI_to_WAX_Temp.inputs.output_warped_image = True
ODI_to_WAX_Temp.inputs.sigma_units = ['vox'] * 3
ODI_to_WAX_Temp.inputs.transforms = ['Rigid', 'Affine', 'SyN']
# ODI_to_WAX_Temp.inputs.terminal_output='file' #returns an error
ODI_to_WAX_Temp.inputs.winsorize_lower_quantile = 0.005
ODI_to_WAX_Temp.inputs.winsorize_upper_quantile = 0.995
ODI_to_WAX_Temp.inputs.convergence_threshold = [1e-6]
ODI_to_WAX_Temp.inputs.convergence_window_size = [10]
ODI_to_WAX_Temp.inputs.metric = ['MI', 'MI', 'CC']
示例#23
0
def structural_rigid_flirt_nonlin_syn(
    template="/Users/marksm/GitHub/mriPipeline/ants_test/template4.nii.gz",
    input_image="/Users/marksm/GitHub/mriPipeline/ants_test/source_add.nii.gz",
    output_image='final_test.nii.gz',
):
    """Experimental Registration. Performs Rigid body transformation using FSL's FLIRT,
	including transformations of the coordinate systems. Subsequently ANTs' SyN performes
	the non-linear part of the Registration process.
	"""

    template = os.path.abspath(os.path.expanduser(template))
    input_image = os.path.abspath(os.path.expanduser(input_image))
    output_image = os.path.abspath(os.path.expanduser(output_image))

    n4 = ants.N4BiasFieldCorrection()
    n4.inputs.dimension = 3
    n4.inputs.input_image = input_image
    # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
    n4.inputs.bspline_fitting_distance = 10
    # n4.inputs.shrink_factor = 2
    n4.inputs.n_iterations = [200, 200, 200, 200]
    n4.inputs.convergence_threshold = 1e-11
    # n4.inputs.output_image = 'ss_n4_{}_ofM{}.nii.gz'.format(participant,i)
    n4.inputs.output_image = 'hard_flirt.nii.gz'
    n4_res = n4.run()

    flt = fsl.FLIRT(cost_func='corratio',
                    dof=6,
                    searchr_x=[-180, 180],
                    searchr_y=[-180, 180],
                    searchr_z=[-180, 180],
                    force_scaling=True)
    flt.inputs.in_file = n4_res.outputs.output_image
    flt.inputs.reference = template
    flt.inputs.out_file = 'after_flirt.nii.gz'
    flt.inputs.out_matrix_file = 'subject_to_template.mat'
    flt_res = flt.run()

    struct_registration = ants.Registration()
    struct_registration.inputs.fixed_image = template
    struct_registration.inputs.output_transform_prefix = "output_"
    struct_registration.inputs.transforms = ['SyN']  ##
    struct_registration.inputs.transform_parameters = [(0.1, 3.0, 0.0)]  ##
    struct_registration.inputs.number_of_iterations = [[2000, 1000, 500]]  #
    struct_registration.inputs.dimension = 3
    struct_registration.inputs.write_composite_transform = True
    struct_registration.inputs.collapse_output_transforms = True
    struct_registration.inputs.initial_moving_transform_com = True
    # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
    struct_registration.inputs.metric = ['Mattes']
    struct_registration.inputs.metric_weight = [1]
    struct_registration.inputs.radius_or_number_of_bins = [8]  #
    struct_registration.inputs.sampling_strategy = ['Random']
    struct_registration.inputs.sampling_percentage = [0.3]
    struct_registration.inputs.convergence_threshold = [1.e-10]  #
    struct_registration.inputs.convergence_window_size = [4]
    struct_registration.inputs.smoothing_sigmas = [[4, 2, 1]]
    struct_registration.inputs.sigma_units = ['vox']
    struct_registration.inputs.shrink_factors = [[4, 2, 1]]
    struct_registration.inputs.use_estimate_learning_rate_once = [True]
    # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
    struct_registration.inputs.use_histogram_matching = [
        False,
    ]
    struct_registration.inputs.winsorize_lower_quantile = 0.005
    struct_registration.inputs.winsorize_upper_quantile = 0.98
    struct_registration.inputs.args = '--float'
    struct_registration.inputs.num_threads = 6

    struct_registration.inputs.moving_image = flt_res.outputs.out_file
    # struct_registration.inputs.output_warped_image = 'ss_{}_ofM{}.nii.gz'.format(participant,i)
    struct_registration.inputs.output_warped_image = output_image
    res = struct_registration.run()
示例#24
0
def bmap_registration(name="Bmap_Registration"):
    """
    A workflow to register a source B0 map to the T1w image of a real subject.
    """
    workflow = pe.Workflow(name=name)

    # Setup i/o
    inputnode = pe.Node(
        niu.IdentityInterface(fields=['mag', 'pha', 't1w_brain', 'dwi_mask']),
        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['magnitude', 'wrapped', 'unwrapped', 'mag_brain']),
                         name='outputnode')

    # Setup initial nodes
    fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='GetFirst')

    mag2RAS = pe.Node(fs.MRIConvert(out_type="niigz", out_orientation="RAS"),
                      name='MagToRAS')
    unwrap = pe.Node(PhaseUnwrap(), name='PhaseUnwrap')
    pha2RAS = pe.Node(fs.MRIConvert(out_type="niigz", out_orientation="RAS"),
                      name='PhaToRAS')

    n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias')
    bet = pe.Node(fsl.BET(frac=0.4, mask=True), name='BrainExtraction')

    enh_mag = pe.Node(SigmoidFilter(upper_perc=98.8, lower_perc=10.0),
                      name='enh_mag')
    enh_t1w = pe.Node(SigmoidFilter(upper_perc=78.0, lower_perc=15.0),
                      name='enh_t1w')

    # Setup ANTS and registration
    def _aslist(tname):
        import numpy as np
        return np.atleast_1d(tname).tolist()

    fmm2t1w = pe.Node(ants.Registration(output_warped_image=True),
                      name="FMm_to_T1w")
    fmm2t1w.inputs.transforms = ['Rigid'] * 2
    fmm2t1w.inputs.transform_parameters = [(1.0, )] * 2
    fmm2t1w.inputs.number_of_iterations = [[250], [100]]
    fmm2t1w.inputs.dimension = 3
    fmm2t1w.inputs.metric = ['Mattes', 'Mattes']
    fmm2t1w.inputs.metric_weight = [1.0] * 2
    fmm2t1w.inputs.radius_or_number_of_bins = [64, 64]
    fmm2t1w.inputs.sampling_strategy = ['Regular', 'Random']
    fmm2t1w.inputs.sampling_percentage = [None, 0.1]
    fmm2t1w.inputs.convergence_threshold = [1.e-5, 1.e-7]
    fmm2t1w.inputs.convergence_window_size = [10, 5]
    fmm2t1w.inputs.smoothing_sigmas = [[6.0], [2.0]]
    fmm2t1w.inputs.sigma_units = ['vox'] * 2
    fmm2t1w.inputs.shrink_factors = [[6], [1]]  # ,[1] ]
    fmm2t1w.inputs.use_estimate_learning_rate_once = [True] * 2
    fmm2t1w.inputs.use_histogram_matching = [True] * 2
    fmm2t1w.inputs.initial_moving_transform_com = 0
    fmm2t1w.inputs.collapse_output_transforms = True

    binarize = pe.Node(fs.Binarize(min=0.1), name='BinT1')

    warpPhase = pe.Node(ants.ApplyTransforms(dimension=3,
                                             interpolation='BSpline'),
                        name='WarpPhase')
    warpMag = pe.Node(ants.ApplyTransforms(dimension=3,
                                           interpolation='BSpline'),
                      name='WarpMag')

    # Final regrids and phase re-wrapping
    regrid_mag = pe.Node(fs.MRIConvert(resample_type='cubic',
                                       out_datatype='float'),
                         name='Regrid_mag')
    regrid_bmg = pe.Node(fs.MRIConvert(resample_type='cubic',
                                       out_datatype='float'),
                         name='Regrid_mag_brain')
    regrid_pha = pe.Node(fs.MRIConvert(resample_type='cubic',
                                       out_datatype='float'),
                         name='Regrid_pha')

    # denoise = pe.Node(niu.Function(
    #     input_names=['in_file', 'in_mask'], output_names=['out_file'],
    #     function=filter_fmap), name='SmoothBmap')

    addnoise = pe.Node(AddNoise(snr=30), name='PhaseAddNoise')
    wrap_pha = pe.Node(niu.Function(input_names=['in_file'],
                                    output_names=['out_file'],
                                    function=rads_ph_wrap),
                       name='PhaseWrap')

    mwrapped = pe.Node(niu.Merge(2), name='MergeWrapped')
    munwrapped = pe.Node(niu.Merge(2), name='MergeUnwrapped')

    workflow.connect([
        (inputnode, binarize, [('t1w_brain', 'in_file')]),
        (inputnode, fslroi, [('mag', 'in_file')]),
        # Connect first nodes
        (fslroi, mag2RAS, [('roi_file', 'in_file')]),
        (mag2RAS, n4, [('out_file', 'input_image')]),
        (n4, bet, [('output_image', 'in_file')]),
        (inputnode, enh_t1w, [('t1w_brain', 'in_file')]),
        (binarize, enh_t1w, [('binary_file', 'in_mask')]),
        (n4, enh_mag, [('output_image', 'in_file')]),
        (bet, enh_mag, [('mask_file', 'in_mask')]),
        # ANTs
        (enh_t1w, fmm2t1w, [('out_file', 'fixed_image')]),
        (enh_mag, fmm2t1w, [('out_file', 'moving_image')]),
        (binarize, fmm2t1w, [('binary_file', 'fixed_image_mask')]),
        (bet, fmm2t1w, [('mask_file', 'moving_image_mask')]),

        # Unwrap
        (inputnode, unwrap, [('pha', 'in_file')]),
        (bet, unwrap, [('mask_file', 'in_mask')]),
        (unwrap, pha2RAS, [('out_file', 'in_file')]),

        # Transforms
        (inputnode, warpPhase, [('t1w_brain', 'reference_image')]),
        (pha2RAS, warpPhase, [('out_file', 'input_image')]),
        (fmm2t1w, warpPhase, [('forward_transforms', 'transforms'),
                              ('forward_invert_flags',
                               'invert_transform_flags')]),
        (inputnode, warpMag, [('t1w_brain', 'reference_image')]),
        (n4, warpMag, [('output_image', 'input_image')]),
        (fmm2t1w, warpMag, [('forward_transforms', 'transforms'),
                            ('forward_invert_flags', 'invert_transform_flags')
                            ]),
        (warpMag, regrid_mag, [('output_image', 'in_file')]),
        (inputnode, regrid_mag, [('dwi_mask', 'reslice_like')]),
        (fmm2t1w, regrid_bmg, [('warped_image', 'in_file')]),
        (inputnode, regrid_bmg, [('dwi_mask', 'reslice_like')]),
        (warpPhase, regrid_pha, [('output_image', 'in_file')]),
        (inputnode, regrid_pha, [('dwi_mask', 'reslice_like')]),
        (regrid_pha, addnoise, [('out_file', 'in_file')]),
        # (regrid_pha,        denoise, [('out_file', 'in_file')]),
        # (inputnode,         denoise, [('dwi_mask', 'in_mask')]),
        # (denoise,          addnoise, [('out_file', 'in_file')]),
        (inputnode, addnoise, [('dwi_mask', 'in_mask')]),
        (addnoise, wrap_pha, [('out_file', 'in_file')]),
        (regrid_bmg, munwrapped, [('out_file', 'in1')]),
        (regrid_pha, munwrapped, [('out_file', 'in2')]),
        # (denoise,        munwrapped, [('out_file', 'in2')]),
        (regrid_bmg, mwrapped, [('out_file', 'in1')]),
        (wrap_pha, mwrapped, [('out_file', 'in2')]),
        (regrid_mag, outputnode, [('out_file', 'magnitude')]),
        (regrid_bmg, outputnode, [('out_file', 'mag_brain')]),
        (munwrapped, outputnode, [('out', 'unwrapped')]),
        (mwrapped, outputnode, [('out', 'wrapped')]),
    ])
    return workflow
示例#25
0
def create_fs_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = Workflow(name=name)

    inputnode = Node(interface=IdentityInterface(fields=[
        'source_files', 'mean_image', 'subject_id', 'subjects_dir',
        'target_image'
    ]),
                     name='inputspec')

    outputnode = Node(interface=IdentityInterface(fields=[
        'func2anat_transform', 'out_reg_file', 'anat2target_transform',
        'transforms', 'transformed_mean', 'transformed_files', 'min_cost_file',
        'anat2target', 'aparc', 'mean2anat_mask'
    ]),
                      name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(registered_file=True),
                      name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')

    # Create a mask of the median coregistered to the anatomical image
    mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
    register.connect(bbregister, 'registered_file', mean2anat_mask, 'in_file')
    """
    use aparc+aseg's brain mask
    """

    binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1),
                    name="binarize_aparc")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize,
                     "in_file")

    stripper = Node(fsl.ApplyMask(), name='stripper')
    register.connect(binarize, "binary_file", stripper, "mask_file")
    register.connect(convert, 'out_file', stripper, 'in_file')
    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                 interp='nearest'),
                    name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm,
                     'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {
        'qsub_args': '-pe orte 4',
        'sbatch_args': '--mem=6G -c 4'
    }
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 0
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    # warpmean.inputs.num_threads = 4
    # warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'}
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = pe.MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image'],
                         name='warpall')
    warpall.inputs.input_image_type = 0
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.terminal_output = 'file'
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '--mem=6G -c 2'}
    """
    Assign all the output files
    """

    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(warpall, 'output_image', outputnode, 'transformed_files')

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    register.connect(inputnode, 'target_image', warpall, 'reference_image')
    register.connect(inputnode, 'source_files', warpall, 'input_image')
    register.connect(merge, 'out', warpall, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file', outputnode,
                     'func2anat_transform')
    register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file')
    register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file')
    register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')

    return register
示例#26
0
def CreateMALFWorkflow(WFname, onlyT1, master_config,BASE_DATA_GRABBER_DIR=None, runFixFusionLabelMap=True):
    from nipype.interfaces import ants

    if onlyT1:
      n_modality = 1
    else:
      n_modality = 2
    CLUSTER_QUEUE=master_config['queue']
    CLUSTER_QUEUE_LONG=master_config['long_q']

    MALFWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['subj_t1_image', #Desired image to create label map for
                                                             'subj_t2_image', #Desired image to create label map for
                                                             'subj_lmks', #The landmarks corresponding to t1_image
                                                             'subj_fixed_head_labels', #The fixed head labels from BABC
                                                             'subj_left_hemisphere', #The warped left hemisphere mask
                                                             'atlasWeightFilename',  #The static weights file name
                                                             'labelBaseFilename' #Atlas label base name ex) neuro_lbls.nii.gz
                                                            ]),
                         run_without_submitting=True,
                         name='inputspec')
    outputsSpec = pe.Node(interface=IdentityInterface(fields=['MALF_HDAtlas20_2015_label',
                                                       'MALF_HDAtlas20_2015_CSFVBInjected_label',
                                                       'MALF_HDAtlas20_2015_fs_standard_label',
                                                       'MALF_HDAtlas20_2015_lobar_label',
                                                       'MALF_extended_snapshot']),
                          run_without_submitting=True,
                          name='outputspec')

    BLICreator = dict()
    A2SantsRegistrationPreMALF_SyN = dict()
    fixedROIAuto = dict()
    movingROIAuto = dict()
    labelMapResample = dict()
    NewlabelMapResample = dict()

    malf_atlas_mergeindex = 0
    merge_input_offset = 1 #Merge nodes are indexed from 1, not zero!
    """
    multimodal ants registration if t2 exists
    """
    sessionMakeMultimodalInput = pe.Node(Function(function=MakeVector,
                                                                      input_names=['inFN1', 'inFN2'],
                                                                      output_names=['outFNs']),
                                run_without_submitting=True, name="sessionMakeMultimodalInput")
    MALFWF.connect(inputsSpec, 'subj_t1_image', sessionMakeMultimodalInput, 'inFN1')
    if not onlyT1:
        MALFWF.connect(inputsSpec, 'subj_t2_image', sessionMakeMultimodalInput, 'inFN2')
    else:
        pass


    #print('malf_atlas_db_base')
    #print(master_config['malf_atlas_db_base'])
    malfAtlasDict = readMalfAtlasDbBase( master_config['malf_atlas_db_base'] )
    number_of_atlas_sources = len(malfAtlasDict)
    malfAtlases = dict()
    atlasMakeMultimodalInput = dict()
    t2Resample = dict()
    warpedAtlasLblMergeNode = pe.Node(interface=Merge(number_of_atlas_sources),name="LblMergeAtlas")
    NewwarpedAtlasLblMergeNode = pe.Node(interface=Merge(number_of_atlas_sources),name="fswmLblMergeAtlas")
    warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources*n_modality),name="MergeAtlases")

    for malf_atlas_subject in list(malfAtlasDict.keys()):
        ## Need DataGrabber Here For the Atlas
        malfAtlases[malf_atlas_subject] = pe.Node(interface = IdentityInterface(
                                                                  fields=['t1', 't2', 'label', 'lmks']),
                                                                  name='malfAtlasInput'+malf_atlas_subject)
        malfAtlases[malf_atlas_subject].inputs.t1 = malfAtlasDict[malf_atlas_subject]['t1']
        malfAtlases[malf_atlas_subject].inputs.t2 = malfAtlasDict[malf_atlas_subject]['t2']
        malfAtlases[malf_atlas_subject].inputs.label = malfAtlasDict[malf_atlas_subject]['label']
        malfAtlases[malf_atlas_subject].inputs.lmks = malfAtlasDict[malf_atlas_subject]['lmks']
        ## Create BLI first
        ########################################################
        # Run BLI atlas_to_subject
        ########################################################
        BLICreator[malf_atlas_subject] = pe.Node(interface=BRAINSLandmarkInitializer(), name="BLI_"+malf_atlas_subject)
        BLICreator[malf_atlas_subject].inputs.outputTransformFilename = "landmarkInitializer_{0}_to_subject_transform.h5".format(malf_atlas_subject)

        MALFWF.connect(inputsSpec, 'atlasWeightFilename', BLICreator[malf_atlas_subject], 'inputWeightFilename')
        MALFWF.connect(malfAtlases[malf_atlas_subject], 'lmks', BLICreator[malf_atlas_subject], 'inputMovingLandmarkFilename')
        MALFWF.connect(inputsSpec, 'subj_lmks', BLICreator[malf_atlas_subject], 'inputFixedLandmarkFilename')

        ##### Initialize with ANTS Transform For SyN
        currentAtlasToSubjectantsRegistration = 'SyN_AtlasToSubjectANTsPreMALF_'+malf_atlas_subject
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject] = pe.Node(interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration)
        many_cpu_ANTsSyN_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG,4,2,16), 'overwrite': True}
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].plugin_args = many_cpu_ANTsSyN_options_dictionary

        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.num_threads   = -1
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.dimension = 3
        #### DEBUGGIN
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.transforms = ["Affine","Affine","SyN","SyN"]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.transform_parameters = [[0.1],[0.1],[0.1, 3, 0],[0.1, 3, 0]]
        if onlyT1:
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.metric = ['MI','MI','CC','CC']
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.metric_weight = [1.0,1.0,1.0,1.0]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.sampling_percentage = [.5,.5,1.0,1.0]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.radius_or_number_of_bins = [32,32,4,4]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.sampling_strategy = ['Regular','Regular',None,None]
        else:
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.metric = ['MI',['MI','MI'],'CC',['CC','CC']]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.metric_weight = [1.0,[1.0,1.0],1.0,[1.0,1.0]]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.sampling_percentage = [.5,[.5,0.5],1.0,[1.0,1.0]]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.radius_or_number_of_bins = [32,[32,32],4,[4,4]]
            A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.sampling_strategy = ['Regular',['Regular','Regular'],None,[None,None]]


        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.number_of_iterations = [[1000,1000,500],[500,500],[500,500],[500,70]]

        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.convergence_threshold = [1e-8,1e-6,1e-8,1e-6]

        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.convergence_window_size = [12]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.use_histogram_matching = [True,True,True,True]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.shrink_factors = [[8, 4, 2],[2, 1],[8, 4],[2, 1]]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.smoothing_sigmas = [[3, 2, 1],[1, 0],[3, 2],[1, 0]]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.sigma_units = ["vox","vox","vox","vox"]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.use_estimate_learning_rate_once = [False,False,False,False]
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.write_composite_transform = True # Required for initialize_transforms_per_stage
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.collapse_output_transforms = False # Mutually Exclusive with initialize_transforms_per_stage
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.initialize_transforms_per_stage = True
        ## NO NEED FOR THIS A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.save_state = 'SavedInternalSyNState.h5'
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.output_transform_prefix = malf_atlas_subject+'_ToSubjectPreMALF_SyN'
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.winsorize_lower_quantile = 0.01
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.winsorize_upper_quantile = 0.99
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.output_warped_image = malf_atlas_subject + '_2subject.nii.gz'
        ## NO NEED FOR THIS A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'
        A2SantsRegistrationPreMALF_SyN[malf_atlas_subject].inputs.float = True

        ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
        UseRegistrationMasking = True
        if UseRegistrationMasking == True:
            from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto

            fixedROIAuto[malf_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="fixedROIAUTOMask_"+malf_atlas_subject)
            fixedROIAuto[malf_atlas_subject].inputs.ROIAutoDilateSize=10
            fixedROIAuto[malf_atlas_subject].inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz"

            movingROIAuto[malf_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="movingROIAUTOMask_"+malf_atlas_subject)
            fixedROIAuto[malf_atlas_subject].inputs.ROIAutoDilateSize=10
            movingROIAuto[malf_atlas_subject].inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz"

            MALFWF.connect(inputsSpec, 'subj_t1_image',fixedROIAuto[malf_atlas_subject],'inputVolume')
            MALFWF.connect(malfAtlases[malf_atlas_subject], 't1', movingROIAuto[malf_atlas_subject],'inputVolume')

            MALFWF.connect(fixedROIAuto[malf_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'fixed_image_mask')
            MALFWF.connect(movingROIAuto[malf_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'moving_image_mask')

        MALFWF.connect(BLICreator[malf_atlas_subject],'outputTransformFilename',
                       A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'initial_moving_transform')

        """
        make multimodal input for atlases
        """
        atlasMakeMultimodalInput[malf_atlas_subject] = pe.Node(Function(function=MakeVector, input_names=['inFN1', 'inFN2'], output_names=['outFNs']),
                                  run_without_submitting=True, name="atlasMakeMultimodalInput"+malf_atlas_subject)
        MALFWF.connect(malfAtlases[malf_atlas_subject], 't1', atlasMakeMultimodalInput[malf_atlas_subject], 'inFN1')
        if not onlyT1:
            MALFWF.connect(malfAtlases[malf_atlas_subject], 't2', atlasMakeMultimodalInput[malf_atlas_subject], 'inFN2')
        else:
            pass

        MALFWF.connect(sessionMakeMultimodalInput, 'outFNs',
                       A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'fixed_image')
        MALFWF.connect(atlasMakeMultimodalInput[malf_atlas_subject], 'outFNs',
                       A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'moving_image')
        MALFWF.connect(A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'warped_image',
                       warpedAtlasesMergeNode,'in'+str(merge_input_offset + malf_atlas_mergeindex*n_modality) )

        """
        Original t2 resampling
        """
        for modality_index in range(1,n_modality):
            t2Resample[malf_atlas_subject] = pe.Node(interface=ants.ApplyTransforms(),name="resampledT2"+malf_atlas_subject)
            many_cpu_t2Resample_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,1,1,1), 'overwrite': True}
            t2Resample[malf_atlas_subject].plugin_args = many_cpu_t2Resample_options_dictionary
            t2Resample[malf_atlas_subject].inputs.dimension=3
            t2Resample[malf_atlas_subject].inputs.output_image=malf_atlas_subject+'_t2.nii.gz'
            t2Resample[malf_atlas_subject].inputs.interpolation='BSpline'
            t2Resample[malf_atlas_subject].inputs.default_value=0
            t2Resample[malf_atlas_subject].inputs.invert_transform_flags=[False]

            MALFWF.connect( A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'composite_transform',
                            t2Resample[malf_atlas_subject],'transforms')
            MALFWF.connect( inputsSpec, 'subj_t1_image',
                            t2Resample[malf_atlas_subject],'reference_image')
            MALFWF.connect( malfAtlases[malf_atlas_subject], 't2',
                            t2Resample[malf_atlas_subject],'input_image')
            MALFWF.connect(t2Resample[malf_atlas_subject],'output_image',
                           warpedAtlasesMergeNode,'in'+str(merge_input_offset + malf_atlas_mergeindex*n_modality+modality_index) )

        """
        Original labelmap resampling
        """
        labelMapResample[malf_atlas_subject] = pe.Node(interface=ants.ApplyTransforms(),name="resampledLabel"+malf_atlas_subject)
        many_cpu_labelMapResample_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,1,1,1), 'overwrite': True}
        labelMapResample[malf_atlas_subject].plugin_args = many_cpu_labelMapResample_options_dictionary
        labelMapResample[malf_atlas_subject].inputs.dimension=3
        labelMapResample[malf_atlas_subject].inputs.output_image=malf_atlas_subject+'_2_subj_lbl.nii.gz'
        labelMapResample[malf_atlas_subject].inputs.interpolation='MultiLabel'
        labelMapResample[malf_atlas_subject].inputs.default_value=0
        labelMapResample[malf_atlas_subject].inputs.invert_transform_flags=[False]

        MALFWF.connect( A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'composite_transform',
                        labelMapResample[malf_atlas_subject],'transforms')
        MALFWF.connect( inputsSpec, 'subj_t1_image',
                        labelMapResample[malf_atlas_subject],'reference_image')
        MALFWF.connect( malfAtlases[malf_atlas_subject], 'label',
                        labelMapResample[malf_atlas_subject],'input_image')


        MALFWF.connect(labelMapResample[malf_atlas_subject],'output_image',warpedAtlasLblMergeNode,'in'+str(merge_input_offset + malf_atlas_mergeindex) )

        ### New labelmap resampling
        NewlabelMapResample[malf_atlas_subject] = pe.Node(interface=ants.ApplyTransforms(),name="FSWM_WLABEL_"+malf_atlas_subject)
        many_cpu_NewlabelMapResample_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,1,1,1), 'overwrite': True}
        NewlabelMapResample[malf_atlas_subject].plugin_args = many_cpu_NewlabelMapResample_options_dictionary
        NewlabelMapResample[malf_atlas_subject].inputs.dimension=3
        NewlabelMapResample[malf_atlas_subject].inputs.output_image=malf_atlas_subject+'fswm_2_subj_lbl.nii.gz'
        NewlabelMapResample[malf_atlas_subject].inputs.interpolation='MultiLabel'
        NewlabelMapResample[malf_atlas_subject].inputs.default_value=0
        NewlabelMapResample[malf_atlas_subject].inputs.invert_transform_flags=[False]

        MALFWF.connect( A2SantsRegistrationPreMALF_SyN[malf_atlas_subject],'composite_transform',
                        NewlabelMapResample[malf_atlas_subject],'transforms')
        MALFWF.connect( inputsSpec, 'subj_t1_image',
                        NewlabelMapResample[malf_atlas_subject],'reference_image')
        MALFWF.connect( malfAtlases[malf_atlas_subject], 'label',
                        NewlabelMapResample[malf_atlas_subject],'input_image')


        MALFWF.connect(NewlabelMapResample[malf_atlas_subject],'output_image',NewwarpedAtlasLblMergeNode,'in'+str(merge_input_offset + malf_atlas_mergeindex) )

        malf_atlas_mergeindex += 1


    ## Now work on cleaning up the label maps
    from .FixLabelMapsTools import FixLabelMapFromNeuromorphemetrics2012
    from .FixLabelMapsTools import RecodeLabelMap

    ### Original NeuroMorphometrica merged fusion
    jointFusion = pe.Node(interface=ants.JointFusion(),name="JointFusion")
    many_cpu_JointFusion_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,8,4,4), 'overwrite': True}
    jointFusion.plugin_args = many_cpu_JointFusion_options_dictionary
    jointFusion.inputs.dimension=3
    jointFusion.inputs.method='Joint[0.1,2]'
    jointFusion.inputs.output_label_image='MALF_HDAtlas20_2015_label.nii.gz'

    MALFWF.connect(warpedAtlasesMergeNode,'out',jointFusion,'warped_intensity_images')
    MALFWF.connect(warpedAtlasLblMergeNode,'out',jointFusion,'warped_label_images')
    #MALFWF.connect(inputsSpec, 'subj_t1_image',jointFusion,'target_image')
    MALFWF.connect(sessionMakeMultimodalInput, 'outFNs',jointFusion,'target_image')
    MALFWF.connect(jointFusion, 'output_label_image', outputsSpec,'MALF_HDAtlas20_2015_label')

    if onlyT1:
        jointFusion.inputs.modalities=1
    else:
        jointFusion.inputs.modalities=2


    ## We need to recode values to ensure that the labels match FreeSurer as close as possible by merging
    ## some labels together to standard FreeSurfer confenventions (i.e. for WMQL)
    RECODE_LABELS_2_Standard_FSWM = [
                                (15071,47),(15072,47),(15073,47),(15145,1011),(15157,1011),(15161,1011),
                                (15179,1012),(15141,1014),(15151,1017),(15163,1018),(15165,1019),(15143,1027),
                                (15191,1028),(15193,1028),(15185,1030),(15201,1030),(15175,1031),(15195,1031),
                                (15173,1035),(15144,2011),(15156,2011),(15160,2011),(15178,2012),(15140,2014),
                                (15150,2017),(15162,2018),(15164,2019),(15142,2027),(15190,2028),(15192,2028),
                                (15184,2030),(15174,2031),(15194,2031),(15172,2035),(15200,2030)]
    ## def RecodeLabelMap(InputFileName,OutputFileName,RECODE_TABLE):
    RecodeToStandardFSWM = pe.Node(Function(function=RecodeLabelMap,
                                                   input_names=['InputFileName','OutputFileName','RECODE_TABLE'],
                                                   output_names=['OutputFileName']),
                                                   name="RecodeToStandardFSWM")
    RecodeToStandardFSWM.inputs.RECODE_TABLE = RECODE_LABELS_2_Standard_FSWM
    RecodeToStandardFSWM.inputs.OutputFileName = 'MALF_HDAtlas20_2015_fs_standard_label.nii.gz'

    MALFWF.connect(RecodeToStandardFSWM,'OutputFileName',outputsSpec,'MALF_HDAtlas20_2015_fs_standard_label')

    ## MALF_SNAPSHOT_WRITER for Segmented result checking:
#    MALF_SNAPSHOT_WRITERNodeName = "MALF_ExtendedMALF_SNAPSHOT_WRITER"
#    MALF_SNAPSHOT_WRITER = pe.Node(interface=BRAINSSnapShotWriter(), name=MALF_SNAPSHOT_WRITERNodeName)

#    MALF_SNAPSHOT_WRITER.inputs.outputFilename = 'MALF_HDAtlas20_2015_CSFVBInjected_label.png'  # output specification
#    MALF_SNAPSHOT_WRITER.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
#    MALF_SNAPSHOT_WRITER.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22]

#    MALFWF.connect(MALF_SNAPSHOT_WRITER,'outputFilename',outputsSpec,'MALF_extended_snapshot')

    if runFixFusionLabelMap:
        ## post processing of jointfusion
        injectSurfaceCSFandVBIntoLabelMap = pe.Node(Function(function=FixLabelMapFromNeuromorphemetrics2012,
                                                      input_names=['fusionFN',
                                                        'FixedHeadFN',
                                                        'LeftHemisphereFN',
                                                        'outFN',
                                                        'OUT_DICT'],
                                                      output_names=['fixedFusionLabelFN']),
                                               name="injectSurfaceCSFandVBIntoLabelMap")
        injectSurfaceCSFandVBIntoLabelMap.inputs.outFN = 'MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz'
        FREESURFER_DICT = { 'BRAINSTEM': 16, 'RH_CSF':24, 'LH_CSF':24, 'BLOOD': 15000, 'UNKNOWN': 999,
                            'CONNECTED': [11,12,13,9,17,26,50,51,52,48,53,58]
                          }
        injectSurfaceCSFandVBIntoLabelMap.inputs.OUT_DICT = FREESURFER_DICT
        MALFWF.connect(jointFusion, 'output_label_image', injectSurfaceCSFandVBIntoLabelMap, 'fusionFN')
        MALFWF.connect(inputsSpec, 'subj_fixed_head_labels', injectSurfaceCSFandVBIntoLabelMap, 'FixedHeadFN')
        MALFWF.connect(inputsSpec, 'subj_left_hemisphere', injectSurfaceCSFandVBIntoLabelMap, 'LeftHemisphereFN')

        MALFWF.connect(injectSurfaceCSFandVBIntoLabelMap, 'fixedFusionLabelFN',
                       RecodeToStandardFSWM,'InputFileName')

        MALFWF.connect(injectSurfaceCSFandVBIntoLabelMap,'fixedFusionLabelFN',
                       outputsSpec,'MALF_HDAtlas20_2015_CSFVBInjected_label')
#        MALFWF.connect([(inputsSpec, MALF_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
#                    (injectSurfaceCSFandVBIntoLabelMap, MALF_SNAPSHOT_WRITER,
#                      [('fixedFusionLabelFN', 'inputBinaryVolumes')])
#                   ])
    else:
        MALFWF.connect(jointFusion, 'output_label_image',
                       RecodeToStandardFSWM,'InputFileName')
        MALFWF.connect(jointFusion, 'output_label_image',
                       outputsSpec,'MALF_HDAtlas20_2015_CSFVBInjected_label')
#        MALFWF.connect([(inputsSpec, MALF_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
#                    (jointFusion, MALF_SNAPSHOT_WRITER,
#                      [('output_label_image', 'inputBinaryVolumes')])
#                   ])

    ## Lobar Pacellation by recoding
    if master_config['relabel2lobes_filename'] != None:
        #print("Generate relabeled version based on {0}".format(master_config['relabel2lobes_filename']))

        RECODE_LABELS_2_LobarPacellation = readRecodingList( master_config['relabel2lobes_filename'] )
        RecordToFSLobes = pe.Node(Function(function=RecodeLabelMap,
                                                    input_names=['InputFileName','OutputFileName','RECODE_TABLE'],
                                                    output_names=['OutputFileName']),
                                                    name="RecordToFSLobes")
        RecordToFSLobes.inputs.RECODE_TABLE = RECODE_LABELS_2_LobarPacellation
        RecordToFSLobes.inputs.OutputFileName = 'MALF_HDAtlas20_2015_lobar_label.nii.gz'
        MALFWF.connect(RecodeToStandardFSWM, 'OutputFileName',RecordToFSLobes,'InputFileName')
        MALFWF.connect(RecordToFSLobes,'OutputFileName',outputsSpec,'MALF_HDAtlas20_2015_lobar_label')

    return MALFWF
# HighresToTemplate.inputs.shrink_factors=[[8, 4, 2, 1]]*3
# HighresToTemplate.inputs.smoothing_sigmas=[[3, 2, 1, 0]]*3
# HighresToTemplate.inputs.transform_parameters=[(0.1,),
#                                                  (0.1,),
#                                                  (0.1, 3.0, 0.0)]
# HighresToTemplate.inputs.use_histogram_matching=True
# HighresToTemplate.inputs.write_composite_transform=True
# HighresToTemplate.inputs.verbose=True
# HighresToTemplate.inputs.output_warped_image=True
# HighresToTemplate.inputs.float=True

#-----------------------------------------------------------------------------------------------------


# In[9]:
CoReg = Node(ants.Registration(), name = 'CoReg')
CoReg.inputs.args='--float'
CoReg.inputs.collapse_output_transforms=True
CoReg.inputs.initial_moving_transform_com=True
CoReg.inputs.num_threads=8
CoReg.inputs.output_inverse_warped_image=True
CoReg.inputs.output_warped_image=True
CoReg.inputs.sigma_units=['vox']*3
CoReg.inputs.transforms= ['Rigid']
CoReg.inputs.winsorize_lower_quantile=0.005
CoReg.inputs.winsorize_upper_quantile=0.995
CoReg.inputs.convergence_threshold=[1e-06]
CoReg.inputs.convergence_window_size=[10]
CoReg.inputs.metric=['MI', 'MI', 'CC']
CoReg.inputs.metric_weight=[1.0]*3
CoReg.inputs.number_of_iterations=[[1000, 500, 250, 100],
示例#28
0
io_DataSink_1 = pe.Node(interface=io.DataSink(), name='io_DataSink_1')

#Generic datasink module to store structured outputs
io_DataSink_2 = pe.Node(interface=io.DataSink(), name='io_DataSink_2')

#Generic datasink module to store structured outputs
io_DataSink_3 = pe.Node(interface=io.DataSink(), name='io_DataSink_3')

#Generic datasink module to store structured outputs
io_DataSink_4 = pe.Node(interface=io.DataSink(), name='io_DataSink_4')

#Generic datasink module to store structured outputs
io_DataSink_5 = pe.Node(interface=io.DataSink(), name='io_DataSink_5')

#Wraps the executable command ``antsRegistration``.
ants_Registration = pe.Node(interface=ants.Registration(),
                            name='ants_Registration')

#Wraps the executable command ``Atropos``.
ants_Atropos = pe.Node(interface=ants.Atropos(), name='ants_Atropos')

#Wraps the executable command ``AverageImages``.
ants_AverageImages = pe.Node(interface=ants.AverageImages(),
                             name='ants_AverageImages')

#Wraps the executable command ``modelfit``.
camino_ModelFit = pe.Node(interface=camino.ModelFit(), name='camino_ModelFit')

#Wraps the executable command ``vtkstreamlines``.
camino_VtkStreamlines = pe.Node(interface=camino.VtkStreamlines(),
                                name='camino_VtkStreamlines')
def CreateJointFusionWorkflow(WFname,
                              onlyT1,
                              master_config,
                              runFixFusionLabelMap=True):
    from nipype.interfaces import ants

    if onlyT1:
        n_modality = 1
    else:
        n_modality = 2
    CLUSTER_QUEUE = master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']

    JointFusionWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(
        interface=IdentityInterface(fields=[
            'subj_t1_image',  # Desired image to create label map for
            'subj_t2_image',  # Desired image to create label map for
            'subj_lmks',  # The landmarks corresponding to t1_image
            'subj_fixed_head_labels',
            # The fixed head labels from BABC
            'subj_posteriors',  # The BABC posteriors
            'subj_left_hemisphere',  # The warped left hemisphere mask
            'atlasWeightFilename',  # The static weights file name
            'labelBaseFilename'
            # Atlas label base name ex) neuro_lbls.nii.gz
        ]),
        run_without_submitting=True,
        name='inputspec')
    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'JointFusion_HDAtlas20_2015_label',
        'JointFusion_HDAtlas20_2015_CSFVBInjected_label',
        'JointFusion_HDAtlas20_2015_fs_standard_label',
        'JointFusion_HDAtlas20_2015_lobe_label',
        'JointFusion_extended_snapshot',
        'JointFusion_HDAtlas20_2015_dustCleaned_label',
        'JointFusion_volumes_csv', 'JointFusion_volumes_json',
        'JointFusion_lobe_volumes_csv', 'JointFusion_lobe_volumes_json'
    ]),
                          run_without_submitting=True,
                          name='outputspec')

    from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
    BLICreator = OrderedDict()
    A2SantsRegistrationPreJointFusion_SyN = OrderedDict()
    movingROIAuto = OrderedDict()
    labelMapResample = OrderedDict()
    NewlabelMapResample = OrderedDict()

    jointFusion_atlas_mergeindex = 0
    merge_input_offset = 1  # Merge nodes are indexed from 1, not zero!
    """
    multimodal ants registration if t2 exists
    """
    sessionMakeMultimodalInput = pe.Node(Function(
        function=MakeVector,
        input_names=['inFN1', 'inFN2', 'jointFusion'],
        output_names=['outFNs']),
                                         run_without_submitting=True,
                                         name="sessionMakeMultimodalInput")
    sessionMakeMultimodalInput.inputs.jointFusion = False
    JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                          sessionMakeMultimodalInput, 'inFN1')
    """
    T2 resample to T1 average image
    :: BRAINSABC changed its behavior to retain image's original spacing & origin
    :: Since antsJointFusion only works for the identical origin images for targets,
    :: Resampling is placed at this stage
    """
    subjectT2Resample = pe.Node(interface=BRAINSResample(),
                                name="BRAINSResample_T2_forAntsJointFusion")
    if not onlyT1:
        subjectT2Resample.plugin_args = {
            'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
            'overwrite': True
        }
        subjectT2Resample.inputs.pixelType = 'short'
        subjectT2Resample.inputs.interpolationMode = 'Linear'
        subjectT2Resample.inputs.outputVolume = "t2_resampled_in_t1.nii.gz"
        # subjectT2Resample.inputs.warpTransform= "Identity" # Default is "Identity"

        JointFusionWF.connect(inputsSpec, 'subj_t1_image', subjectT2Resample,
                              'referenceVolume')
        JointFusionWF.connect(inputsSpec, 'subj_t2_image', subjectT2Resample,
                              'inputVolume')

        JointFusionWF.connect(subjectT2Resample, 'outputVolume',
                              sessionMakeMultimodalInput, 'inFN2')
    else:
        pass

    # print('jointFusion_atlas_db_base')
    print("master_config")
    print(master_config)
    print("master_config['jointfusion_atlas_db_base']")
    print((master_config['jointfusion_atlas_db_base']))
    jointFusionAtlasDict = readMalfAtlasDbBase(
        master_config['jointfusion_atlas_db_base'])
    number_of_atlas_sources = len(jointFusionAtlasDict)
    jointFusionAtlases = OrderedDict()
    atlasMakeMultimodalInput = OrderedDict()
    t2Resample = OrderedDict()
    warpedAtlasLblMergeNode = pe.Node(interface=Merge(number_of_atlas_sources),
                                      name="LblMergeAtlas")
    NewwarpedAtlasLblMergeNode = pe.Node(
        interface=Merge(number_of_atlas_sources), name="fswmLblMergeAtlas")
    # "HACK NOT to use T2 for JointFusion only"
    # warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources*n_modality),name="MergeAtlases")
    warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources *
                                                     1),
                                     name="MergeAtlases")

    ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
    UseRegistrationMasking = True
    if UseRegistrationMasking == True:
        from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto

        fixedROIAuto = pe.Node(interface=BRAINSROIAuto(),
                               name="fixedROIAUTOMask")
        fixedROIAuto.inputs.ROIAutoDilateSize = 10
        fixedROIAuto.inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz"
        JointFusionWF.connect(inputsSpec, 'subj_t1_image', fixedROIAuto,
                              'inputVolume')

    for jointFusion_atlas_subject in list(jointFusionAtlasDict.keys()):
        ## Need DataGrabber Here For the Atlas
        jointFusionAtlases[jointFusion_atlas_subject] = pe.Node(
            interface=IdentityInterface(
                fields=['t1', 't2', 'label', 'lmks', 'registration_mask']),
            name='jointFusionAtlasInput' + jointFusion_atlas_subject)
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.t1 = jointFusionAtlasDict[
                jointFusion_atlas_subject]['t1']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.t2 = jointFusionAtlasDict[
                jointFusion_atlas_subject]['t2']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.label = jointFusionAtlasDict[
                jointFusion_atlas_subject]['label']
        jointFusionAtlases[
            jointFusion_atlas_subject].inputs.lmks = jointFusionAtlasDict[
                jointFusion_atlas_subject]['lmks']
        jointFusionAtlases[jointFusion_atlas_subject].inputs.registration_mask = \
        jointFusionAtlasDict[jointFusion_atlas_subject]['registration_mask']
        ## Create BLI first
        ########################################################
        # Run BLI atlas_to_subject
        ########################################################
        BLICreator[jointFusion_atlas_subject] = pe.Node(
            interface=BRAINSLandmarkInitializer(),
            name="BLI_" + jointFusion_atlas_subject)
        BLICreator[
            jointFusion_atlas_subject].inputs.outputTransformFilename = "landmarkInitializer_{0}_to_subject_transform.h5".format(
                jointFusion_atlas_subject)

        JointFusionWF.connect(inputsSpec, 'atlasWeightFilename',
                              BLICreator[jointFusion_atlas_subject],
                              'inputWeightFilename')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'lmks', BLICreator[jointFusion_atlas_subject],
                              'inputMovingLandmarkFilename')
        JointFusionWF.connect(inputsSpec, 'subj_lmks',
                              BLICreator[jointFusion_atlas_subject],
                              'inputFixedLandmarkFilename')

        ##### Initialize with ANTS Transform For SyN
        currentAtlasToSubjectantsRegistration = 'SyN_AtlasToSubjectANTsPreJointFusion_' + jointFusion_atlas_subject
        A2SantsRegistrationPreJointFusion_SyN[
            jointFusion_atlas_subject] = pe.Node(
                interface=ants.Registration(),
                name=currentAtlasToSubjectantsRegistration)
        many_cpu_ANTsSyN_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 4, 2, 16),
            'overwrite': True
        }
        A2SantsRegistrationPreJointFusion_SyN[
            jointFusion_atlas_subject].plugin_args = many_cpu_ANTsSyN_options_dictionary
        if onlyT1:
            JFregistrationTypeDescription = "FiveStageAntsRegistrationT1Only"
        else:
            JFregistrationTypeDescription = "FiveStageAntsRegistrationMultiModal"
        CommonANTsRegistrationSettings(
            antsRegistrationNode=A2SantsRegistrationPreJointFusion_SyN[
                jointFusion_atlas_subject],
            registrationTypeDescription=JFregistrationTypeDescription,
            output_transform_prefix=jointFusion_atlas_subject +
            '_ToSubjectPreJointFusion_SyN',
            output_warped_image=jointFusion_atlas_subject + '_2subject.nii.gz',
            output_inverse_warped_image=None,  # NO NEED FOR THIS
            save_state=None,  # NO NEED FOR THIS
            invert_initial_moving_transform=False,
            initial_moving_transform=None)

        ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons
        if UseRegistrationMasking == True:
            from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto
            JointFusionWF.connect(
                fixedROIAuto, 'outputROIMaskVolume',
                A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'fixed_image_masks')
            # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels',
            #                       A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'fixed_image_masks')

            # NOTE: Moving image mask can be taken from Atlas directly so that it does not need to be read in
            # movingROIAuto[jointFusion_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="movingROIAUTOMask_"+jointFusion_atlas_subject)
            # movingROIAuto.inputs.ROIAutoDilateSize=10
            # movingROIAuto[jointFusion_atlas_subject].inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz"
            # JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject], 't1', movingROIAuto[jointFusion_atlas_subject],'inputVolume')
            # JointFusionWF.connect(movingROIAuto[jointFusion_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'moving_image_masks')
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject],
                'registration_mask', A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'moving_image_masks')

        JointFusionWF.connect(
            BLICreator[jointFusion_atlas_subject], 'outputTransformFilename',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'initial_moving_transform')
        """
        make multimodal input for atlases
        """
        atlasMakeMultimodalInput[jointFusion_atlas_subject] = pe.Node(
            Function(function=MakeVector,
                     input_names=['inFN1', 'inFN2', 'jointFusion'],
                     output_names=['outFNs']),
            run_without_submitting=True,
            name="atlasMakeMultimodalInput" + jointFusion_atlas_subject)
        atlasMakeMultimodalInput[
            jointFusion_atlas_subject].inputs.jointFusion = False
        JointFusionWF.connect(
            jointFusionAtlases[jointFusion_atlas_subject], 't1',
            atlasMakeMultimodalInput[jointFusion_atlas_subject], 'inFN1')

        if not onlyT1:
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject], 't2',
                atlasMakeMultimodalInput[jointFusion_atlas_subject], 'inFN2')
        else:
            pass

        JointFusionWF.connect(
            sessionMakeMultimodalInput, 'outFNs',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'fixed_image')
        JointFusionWF.connect(
            atlasMakeMultimodalInput[jointFusion_atlas_subject], 'outFNs',
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'moving_image')
        "HACK NOT to use T2 for JointFusion"
        # JointFusionWF.connect(A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'warped_image',
        #               warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality) )
        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'warped_image', warpedAtlasesMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex * 1))
        """
        Original t2 resampling
        """
        for modality_index in range(1, n_modality):
            t2Resample[jointFusion_atlas_subject] = pe.Node(
                interface=ants.ApplyTransforms(),
                name="resampledT2" + jointFusion_atlas_subject)
            many_cpu_t2Resample_options_dictionary = {
                'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
                'overwrite': True
            }
            t2Resample[
                jointFusion_atlas_subject].plugin_args = many_cpu_t2Resample_options_dictionary
            t2Resample[jointFusion_atlas_subject].inputs.num_threads = -1
            t2Resample[jointFusion_atlas_subject].inputs.dimension = 3
            t2Resample[
                jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + '_t2.nii.gz'
            t2Resample[
                jointFusion_atlas_subject].inputs.interpolation = 'BSpline'
            t2Resample[jointFusion_atlas_subject].inputs.default_value = 0
            t2Resample[
                jointFusion_atlas_subject].inputs.invert_transform_flags = [
                    False
                ]

            JointFusionWF.connect(
                A2SantsRegistrationPreJointFusion_SyN[
                    jointFusion_atlas_subject], 'composite_transform',
                t2Resample[jointFusion_atlas_subject], 'transforms')
            JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                                  t2Resample[jointFusion_atlas_subject],
                                  'reference_image')
            JointFusionWF.connect(
                jointFusionAtlases[jointFusion_atlas_subject], 't2',
                t2Resample[jointFusion_atlas_subject], 'input_image')
            "HACK NOT to use T2 for JointFusion only"
            # JointFusionWF.connect(t2Resample[jointFusion_atlas_subject],'output_image',
            #               warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality+modality_index) )
        """
        Original labelmap resampling
        """
        labelMapResample[jointFusion_atlas_subject] = pe.Node(
            interface=ants.ApplyTransforms(),
            name="resampledLabel" + jointFusion_atlas_subject)
        many_cpu_labelMapResample_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
            'overwrite': True
        }
        labelMapResample[
            jointFusion_atlas_subject].plugin_args = many_cpu_labelMapResample_options_dictionary
        labelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1
        labelMapResample[jointFusion_atlas_subject].inputs.dimension = 3
        labelMapResample[
            jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + '_2_subj_lbl.nii.gz'
        labelMapResample[
            jointFusion_atlas_subject].inputs.interpolation = 'MultiLabel'
        labelMapResample[jointFusion_atlas_subject].inputs.default_value = 0
        labelMapResample[
            jointFusion_atlas_subject].inputs.invert_transform_flags = [False]

        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'composite_transform', labelMapResample[jointFusion_atlas_subject],
            'transforms')
        JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                              labelMapResample[jointFusion_atlas_subject],
                              'reference_image')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'label',
                              labelMapResample[jointFusion_atlas_subject],
                              'input_image')

        JointFusionWF.connect(
            labelMapResample[jointFusion_atlas_subject], 'output_image',
            warpedAtlasLblMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex))

        ### New labelmap resampling
        NewlabelMapResample[jointFusion_atlas_subject] = pe.Node(
            interface=ants.ApplyTransforms(),
            name="FSWM_WLABEL_" + jointFusion_atlas_subject)
        many_cpu_NewlabelMapResample_options_dictionary = {
            'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1),
            'overwrite': True
        }
        NewlabelMapResample[
            jointFusion_atlas_subject].plugin_args = many_cpu_NewlabelMapResample_options_dictionary
        NewlabelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1
        NewlabelMapResample[jointFusion_atlas_subject].inputs.dimension = 3
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.output_image = jointFusion_atlas_subject + 'fswm_2_subj_lbl.nii.gz'
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.interpolation = 'MultiLabel'
        NewlabelMapResample[jointFusion_atlas_subject].inputs.default_value = 0
        NewlabelMapResample[
            jointFusion_atlas_subject].inputs.invert_transform_flags = [False]

        JointFusionWF.connect(
            A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],
            'composite_transform',
            NewlabelMapResample[jointFusion_atlas_subject], 'transforms')
        JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                              NewlabelMapResample[jointFusion_atlas_subject],
                              'reference_image')
        JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject],
                              'label',
                              NewlabelMapResample[jointFusion_atlas_subject],
                              'input_image')

        JointFusionWF.connect(
            NewlabelMapResample[jointFusion_atlas_subject], 'output_image',
            NewwarpedAtlasLblMergeNode,
            'in' + str(merge_input_offset + jointFusion_atlas_mergeindex))

        jointFusion_atlas_mergeindex += 1

    ## Now work on cleaning up the label maps
    from .FixLabelMapsTools import FixLabelMapFromNeuromorphemetrics2012
    from .FixLabelMapsTools import RecodeLabelMap

    ### Original NeuroMorphometrica merged fusion
    jointFusion = pe.Node(interface=ants.AntsJointFusion(),
                          name="AntsJointFusion")
    many_cpu_JointFusion_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 10, 8, 16),
        'overwrite': True
    }
    jointFusion.plugin_args = many_cpu_JointFusion_options_dictionary
    jointFusion.inputs.num_threads = -1
    jointFusion.inputs.dimension = 3
    jointFusion.inputs.search_radius = [3]
    # jointFusion.inputs.method='Joint[0.1,2]'
    jointFusion.inputs.out_label_fusion = 'JointFusion_HDAtlas20_2015_label.nii.gz'
    # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', jointFusion, 'mask_image')
    JointFusionWF.connect(fixedROIAuto, 'outputROIMaskVolume', jointFusion,
                          'mask_image')

    JointFusionWF.connect(warpedAtlasLblMergeNode, 'out', jointFusion,
                          'atlas_segmentation_image')

    AdjustMergeListNode = pe.Node(Function(
        function=adjustMergeList,
        input_names=['allList', 'n_modality'],
        output_names=['out']),
                                  name="AdjustMergeListNode")
    "*** HACK JointFusion only uses T1"
    # AdjustMergeListNode.inputs.n_modality = n_modality
    AdjustMergeListNode.inputs.n_modality = 1

    JointFusionWF.connect(warpedAtlasesMergeNode, 'out', AdjustMergeListNode,
                          'allList')
    JointFusionWF.connect(AdjustMergeListNode, 'out', jointFusion,
                          'atlas_image')

    AdjustTargetImageListNode = pe.Node(Function(
        function=adjustMergeList,
        input_names=['allList', 'n_modality'],
        output_names=['out']),
                                        name="AdjustTargetImageListNode")
    AdjustTargetImageListNode.inputs.n_modality = n_modality

    "*** HACK JointFusion only uses T1"
    """ Once JointFusion works with T2 properly,
        delete sessionMakeListSingleModalInput and use sessionMakeMultimodalInput instead
    """
    sessionMakeListSingleModalInput = pe.Node(
        Function(function=MakeVector,
                 input_names=['inFN1', 'inFN2', 'jointFusion'],
                 output_names=['outFNs']),
        run_without_submitting=True,
        name="sessionMakeListSingleModalInput")
    sessionMakeListSingleModalInput.inputs.jointFusion = False
    JointFusionWF.connect(inputsSpec, 'subj_t1_image',
                          sessionMakeListSingleModalInput, 'inFN1')
    JointFusionWF.connect(sessionMakeListSingleModalInput, 'outFNs',
                          jointFusion, 'target_image')

    JointFusionWF.connect(jointFusion, 'out_label_fusion', outputsSpec,
                          'JointFusion_HDAtlas20_2015_label')

    ## We need to recode values to ensure that the labels match FreeSurer as close as possible by merging
    ## some labels together to standard FreeSurfer confenventions (i.e. for WMQL)
    RECODE_LABELS_2_Standard_FSWM = [
        (15071, 47), (15072, 47), (15073, 47), (15145, 1011), (15157, 1011),
        (15161, 1011), (15179, 1012), (15141, 1014), (15151, 1017),
        (15163, 1018), (15165, 1019), (15143, 1027), (15191, 1028),
        (15193, 1028), (15185, 1030), (15201, 1030), (15175, 1031),
        (15195, 1031), (15173, 1035), (15144, 2011), (15156, 2011),
        (15160, 2011), (15178, 2012), (15140, 2014), (15150, 2017),
        (15162, 2018), (15164, 2019), (15142, 2027), (15190, 2028),
        (15192, 2028), (15184, 2030), (15174, 2031), (15194, 2031),
        (15172, 2035), (15200, 2030)
    ]
    ## def RecodeLabelMap(InputFileName,OutputFileName,RECODE_TABLE):
    RecodeToStandardFSWM = pe.Node(Function(
        function=RecodeLabelMap,
        input_names=['InputFileName', 'OutputFileName', 'RECODE_TABLE'],
        output_names=['OutputFileName']),
                                   name="RecodeToStandardFSWM")
    RecodeToStandardFSWM.inputs.RECODE_TABLE = RECODE_LABELS_2_Standard_FSWM
    RecodeToStandardFSWM.inputs.OutputFileName = 'JointFusion_HDAtlas20_2015_fs_standard_label.nii.gz'

    JointFusionWF.connect(RecodeToStandardFSWM, 'OutputFileName', outputsSpec,
                          'JointFusion_HDAtlas20_2015_fs_standard_label')

    ## JointFusion_SNAPSHOT_WRITER for Segmented result checking:
    #    JointFusion_SNAPSHOT_WRITERNodeName = "JointFusion_ExtendedJointFusion_SNAPSHOT_WRITER"
    #    JointFusion_SNAPSHOT_WRITER = pe.Node(interface=BRAINSSnapShotWriter(), name=JointFusion_SNAPSHOT_WRITERNodeName)

    #    JointFusion_SNAPSHOT_WRITER.inputs.outputFilename = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.png'  # output specification
    #    JointFusion_SNAPSHOT_WRITER.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    #    JointFusion_SNAPSHOT_WRITER.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22]

    #    JointFusionWF.connect(JointFusion_SNAPSHOT_WRITER,'outputFilename',outputsSpec,'JointFusion_extended_snapshot')

    myLocalDustCleanup = CreateDustCleanupWorkflow("DUST_CLEANUP", onlyT1,
                                                   master_config)
    JointFusionWF.connect(inputsSpec, 'subj_t1_image', myLocalDustCleanup,
                          'inputspec.subj_t1_image')
    if not onlyT1:
        JointFusionWF.connect(subjectT2Resample, 'outputVolume',
                              myLocalDustCleanup, 'inputspec.subj_t2_image')
    if runFixFusionLabelMap:
        ## post processing of jointfusion
        injectSurfaceCSFandVBIntoLabelMap = pe.Node(
            Function(function=FixLabelMapFromNeuromorphemetrics2012,
                     input_names=[
                         'fusionFN', 'FixedHeadFN', 'posteriorListOfTuples',
                         'LeftHemisphereFN', 'outFN', 'OUT_DICT'
                     ],
                     output_names=['fixedFusionLabelFN']),
            name="injectSurfaceCSFandVBIntoLabelMap")
        injectSurfaceCSFandVBIntoLabelMap.inputs.outFN = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.nii.gz'
        from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
        FREESURFER_DICT = OrderedDict({
            'BRAINSTEM':
            16,
            'RH_CSF':
            24,
            'LH_CSF':
            24,
            'BLOOD':
            15000,
            'UNKNOWN':
            999,
            'CONNECTED': [11, 12, 13, 9, 17, 26, 50, 51, 52, 48, 53, 58]
        })
        injectSurfaceCSFandVBIntoLabelMap.inputs.OUT_DICT = FREESURFER_DICT
        JointFusionWF.connect(jointFusion, 'out_label_fusion',
                              injectSurfaceCSFandVBIntoLabelMap, 'fusionFN')
        JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels',
                              injectSurfaceCSFandVBIntoLabelMap, 'FixedHeadFN')
        JointFusionWF.connect(inputsSpec, 'subj_posteriors',
                              injectSurfaceCSFandVBIntoLabelMap,
                              'posteriorListOfTuples')
        JointFusionWF.connect(inputsSpec, 'subj_left_hemisphere',
                              injectSurfaceCSFandVBIntoLabelMap,
                              'LeftHemisphereFN')

        JointFusionWF.connect(injectSurfaceCSFandVBIntoLabelMap,
                              'fixedFusionLabelFN', myLocalDustCleanup,
                              'inputspec.subj_label_atlas')

        JointFusionWF.connect(
            injectSurfaceCSFandVBIntoLabelMap, 'fixedFusionLabelFN',
            outputsSpec, 'JointFusion_HDAtlas20_2015_CSFVBInjected_label')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            RecodeToStandardFSWM, 'InputFileName')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            outputsSpec, 'JointFusion_HDAtlas20_2015_dustCleaned_label')

    #        JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
    #                    (injectSurfaceCSFandVBIntoLabelMap, JointFusion_SNAPSHOT_WRITER,
    #                      [('fixedFusionLabelFN', 'inputBinaryVolumes')])
    #                   ])
    else:
        JointFusionWF.connect(jointFusion, 'output_label_image',
                              myLocalDustCleanup, 'inputspec.subj_label_atlas')

        JointFusionWF.connect(
            jointFusion, 'output_label_image', outputsSpec,
            'JointFusion_HDAtlas20_2015_CSFVBInjected_label')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            RecodeToStandardFSWM, 'InputFileName')

        JointFusionWF.connect(
            myLocalDustCleanup,
            'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
            outputsSpec, 'JointFusion_HDAtlas20_2015_dustCleaned_label')

    #        JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]),
    #                    (jointFusion, JointFusion_SNAPSHOT_WRITER,
    #                      [('output_label_image', 'inputBinaryVolumes')])
    #                   ])
    """
    Compute label volumes
    """
    computeLabelVolumes = CreateVolumeMeasureWorkflow("LabelVolume",
                                                      master_config)
    JointFusionWF.connect(inputsSpec, 'subj_t1_image', computeLabelVolumes,
                          'inputspec.subj_t1_image')
    JointFusionWF.connect(
        myLocalDustCleanup,
        'outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label',
        computeLabelVolumes, 'inputspec.subj_label_image')
    JointFusionWF.connect(computeLabelVolumes, 'outputspec.csvFilename',
                          outputsSpec, 'JointFusion_volumes_csv')
    JointFusionWF.connect(computeLabelVolumes, 'outputspec.jsonFilename',
                          outputsSpec, 'JointFusion_volumes_json')

    ## Lobe Pacellation by recoding
    if master_config['relabel2lobes_filename'] != None:
        # print("Generate relabeled version based on {0}".format(master_config['relabel2lobes_filename']))

        RECODE_LABELS_2_LobePacellation = readRecodingList(
            master_config['relabel2lobes_filename'])
        RecordToFSLobes = pe.Node(Function(
            function=RecodeLabelMap,
            input_names=['InputFileName', 'OutputFileName', 'RECODE_TABLE'],
            output_names=['OutputFileName']),
                                  name="RecordToFSLobes")
        RecordToFSLobes.inputs.RECODE_TABLE = RECODE_LABELS_2_LobePacellation
        RecordToFSLobes.inputs.OutputFileName = 'JointFusion_HDAtlas20_2015_lobe_label.nii.gz'
        JointFusionWF.connect(RecodeToStandardFSWM, 'OutputFileName',
                              RecordToFSLobes, 'InputFileName')
        JointFusionWF.connect(RecordToFSLobes, 'OutputFileName', outputsSpec,
                              'JointFusion_HDAtlas20_2015_lobe_label')
        """
        Compute lobe volumes
        """
        computeLobeVolumes = CreateVolumeMeasureWorkflow(
            "LobeVolume", master_config)
        JointFusionWF.connect(inputsSpec, 'subj_t1_image', computeLobeVolumes,
                              'inputspec.subj_t1_image')
        JointFusionWF.connect(RecordToFSLobes, 'OutputFileName',
                              computeLobeVolumes, 'inputspec.subj_label_image')
        JointFusionWF.connect(computeLobeVolumes, 'outputspec.csvFilename',
                              outputsSpec, 'JointFusion_lobe_volumes_csv')
        JointFusionWF.connect(computeLobeVolumes, 'outputspec.jsonFilename',
                              outputsSpec, 'JointFusion_lobe_volumes_json')

    return JointFusionWF
示例#30
0
def segmentation(projectid,
                 subjectid,
                 sessionid,
                 master_config,
                 onlyT1=True,
                 pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from .WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from nipype.interfaces.semtools import BRAINSSnapShotWriter

    #CLUSTER_QUEUE=master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']
    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in list(master_config.items()):
        print("-" * 30)
        print(key, ":", itme)
    print("-" * 30)
    #END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        't1_average',
        't2_average',
        'template_t1',
        'hncma_atlas',
        'LMIatlasToSubject_tx',
        'inputLabels',
        'inputHeadLabels',
        'posteriorImages',
        'UpdatedPosteriorsList',
        'atlasToSubjectRegistrationState',
        'rho',
        'phi',
        'theta',
        'l_caudate_ProbabilityMap',
        'r_caudate_ProbabilityMap',
        'l_hippocampus_ProbabilityMap',
        'r_hippocampus_ProbabilityMap',
        'l_putamen_ProbabilityMap',
        'r_putamen_ProbabilityMap',
        'l_thalamus_ProbabilityMap',
        'r_thalamus_ProbabilityMap',
        'l_accumben_ProbabilityMap',
        'r_accumben_ProbabilityMap',
        'l_globus_ProbabilityMap',
        'r_globus_ProbabilityMap',
        'trainModelFile_txtD0060NT0060_gz',
    ]),
                         run_without_submitting=True,
                         name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(
        subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(
        interface=Function(
            function=ClipT1ImageWithBrainMask,
            input_names=['t1_image', 'brain_labels', 'clipped_file_name'],
            output_names=['clipped_file']),
        name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode,
                     [('t1_average', 't1_image'),
                      ('inputLabels', 'brain_labels')])])

    currentA2SantsRegistrationPostABCSyN = 'A2SantsRegistrationPostABCSyN_' + str(
        subjectid) + "_" + str(sessionid)
    ## TODO: It would be great to update the BRAINSABC atlasToSubjectTransform at this point, but
    ##       That requires more testing, and fixes to ANTS to properly collapse transforms.
    ##       For now we are simply creating a dummy node to pass through

    A2SantsRegistrationPostABCSyN = pe.Node(
        interface=ants.Registration(),
        name=currentA2SantsRegistrationPostABCSyN)

    many_cpu_ANTsSyN_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 16),
        'overwrite': True
    }
    A2SantsRegistrationPostABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary
    CommonANTsRegistrationSettings(
        antsRegistrationNode=A2SantsRegistrationPostABCSyN,
        registrationTypeDescription="A2SantsRegistrationPostABCSyN",
        output_transform_prefix='AtlasToSubjectPostBABC_SyN',
        output_warped_image='atlas2subjectPostBABC.nii.gz',
        output_inverse_warped_image='subject2atlasPostBABC.nii.gz',
        save_state='SavedInternalSyNStatePostBABC.h5',
        invert_initial_moving_transform=None)

    ## TODO: Try multi-modal registration here
    baw200.connect([(inputsSpec, A2SantsRegistrationPostABCSyN,
                     [('atlasToSubjectRegistrationState', 'restore_state'),
                      ('t1_average', 'fixed_image'),
                      ('template_t1', 'moving_image')])])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid, subjectid, sessionid,
                                           master_config['queue'],
                                           master_config['long_q'],
                                           "Segmentation", onlyT1)
    MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(
        sessionid)
    MergeStage2AverageImages = pe.Node(interface=Merge(2),
                                       run_without_submitting=True,
                                       name=MergeStage2AverageImagesName)

    baw200.connect([(inputsSpec, myLocalSegWF, [
        ('t1_average', 'inputspec.T1Volume'),
        ('template_t1', 'inputspec.template_t1'),
        ('posteriorImages', "inputspec.posteriorDictionary"),
        ('inputLabels', 'inputspec.RegistrationROI'),
    ]), (inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
                    (A2SantsRegistrationPostABCSyN, myLocalSegWF,
                     [('composite_transform',
                       'inputspec.atlasToSubjectTransform')])])

    baw200.connect([(inputsSpec, myLocalSegWF, [
        ('rho', 'inputspec.rho'), ('phi', 'inputspec.phi'),
        ('theta', 'inputspec.theta'),
        ('l_caudate_ProbabilityMap', 'inputspec.l_caudate_ProbabilityMap'),
        ('r_caudate_ProbabilityMap', 'inputspec.r_caudate_ProbabilityMap'),
        ('l_hippocampus_ProbabilityMap',
         'inputspec.l_hippocampus_ProbabilityMap'),
        ('r_hippocampus_ProbabilityMap',
         'inputspec.r_hippocampus_ProbabilityMap'),
        ('l_putamen_ProbabilityMap', 'inputspec.l_putamen_ProbabilityMap'),
        ('r_putamen_ProbabilityMap', 'inputspec.r_putamen_ProbabilityMap'),
        ('l_thalamus_ProbabilityMap', 'inputspec.l_thalamus_ProbabilityMap'),
        ('r_thalamus_ProbabilityMap', 'inputspec.r_thalamus_ProbabilityMap'),
        ('l_accumben_ProbabilityMap', 'inputspec.l_accumben_ProbabilityMap'),
        ('r_accumben_ProbabilityMap', 'inputspec.r_accumben_ProbabilityMap'),
        ('l_globus_ProbabilityMap', 'inputspec.l_globus_ProbabilityMap'),
        ('r_globus_ProbabilityMap', 'inputspec.r_globus_ProbabilityMap'),
        ('trainModelFile_txtD0060NT0060_gz',
         'inputspec.trainModelFile_txtD0060NT0060_gz')
    ])])

    if not onlyT1:
        baw200.connect([
            (inputsSpec, myLocalSegWF, [('t2_average', 'inputspec.T2Volume')]),
            (inputsSpec, MergeStage2AverageImages, [('t2_average', 'in2')])
        ])
        file_count = 15  # Count of files to merge into MergeSessionSubjectToAtlas
    else:
        file_count = 14  # Count of files to merge into MergeSessionSubjectToAtlas

    ## NOTE: Element 0 of AccumulatePriorsList is the accumulated GM tissue
    # baw200.connect([(AccumulateLikeTissuePosteriorsNode, myLocalSegWF,
    #               [(('AccumulatePriorsList', getListIndex, 0), "inputspec.TotalGM")]),
    #               ])

    ### Now define where the final organized outputs should go.
    DataSink = pe.Node(nio.DataSink(),
                       name="CleanedDenoisedSegmentation_DS_" +
                       str(subjectid) + "_" + str(sessionid))
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.base_directory = master_config['resultdir']
    # DataSink.inputs.regexp_substitutions = GenerateOutputPattern(projectid, subjectid, sessionid,'BRAINSCut')
    # DataSink.inputs.regexp_substitutions = GenerateBRAINSCutImagesOutputPattern(projectid, subjectid, sessionid)
    DataSink.inputs.substitutions = [
        ('Segmentations',
         os.path.join(projectid, subjectid, sessionid,
                      'CleanedDenoisedRFSegmentations')),
        ('subjectANNLabel_', ''), ('ANNContinuousPrediction', ''),
        ('subject.nii.gz', '.nii.gz'), ('_seg.nii.gz', '_seg.nii.gz'),
        ('.nii.gz', '_seg.nii.gz'), ('_seg_seg', '_seg')
    ]

    baw200.connect([
        (myLocalSegWF, DataSink,
         [('outputspec.outputBinaryLeftCaudate', 'Segmentations.@LeftCaudate'),
          ('outputspec.outputBinaryRightCaudate',
           'Segmentations.@RightCaudate'),
          ('outputspec.outputBinaryLeftHippocampus',
           'Segmentations.@LeftHippocampus'),
          ('outputspec.outputBinaryRightHippocampus',
           'Segmentations.@RightHippocampus'),
          ('outputspec.outputBinaryLeftPutamen', 'Segmentations.@LeftPutamen'),
          ('outputspec.outputBinaryRightPutamen',
           'Segmentations.@RightPutamen'),
          ('outputspec.outputBinaryLeftThalamus',
           'Segmentations.@LeftThalamus'),
          ('outputspec.outputBinaryRightThalamus',
           'Segmentations.@RightThalamus'),
          ('outputspec.outputBinaryLeftAccumben',
           'Segmentations.@LeftAccumben'),
          ('outputspec.outputBinaryRightAccumben',
           'Segmentations.@RightAccumben'),
          ('outputspec.outputBinaryLeftGlobus', 'Segmentations.@LeftGlobus'),
          ('outputspec.outputBinaryRightGlobus', 'Segmentations.@RightGlobus'),
          ('outputspec.outputLabelImageName', 'Segmentations.@LabelImageName'),
          ('outputspec.outputCSVFileName', 'Segmentations.@CSVFileName')]),
        # (myLocalSegWF, DataSink, [('outputspec.cleaned_labels', 'Segmentations.@cleaned_labels')])
    ])

    MergeStage2BinaryVolumesName = "99_MergeStage2BinaryVolumes_" + str(
        sessionid)
    MergeStage2BinaryVolumes = pe.Node(interface=Merge(12),
                                       run_without_submitting=True,
                                       name=MergeStage2BinaryVolumesName)

    baw200.connect([(myLocalSegWF, MergeStage2BinaryVolumes,
                     [('outputspec.outputBinaryLeftAccumben', 'in1'),
                      ('outputspec.outputBinaryLeftCaudate', 'in2'),
                      ('outputspec.outputBinaryLeftPutamen', 'in3'),
                      ('outputspec.outputBinaryLeftGlobus', 'in4'),
                      ('outputspec.outputBinaryLeftThalamus', 'in5'),
                      ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                      ('outputspec.outputBinaryRightAccumben', 'in7'),
                      ('outputspec.outputBinaryRightCaudate', 'in8'),
                      ('outputspec.outputBinaryRightPutamen', 'in9'),
                      ('outputspec.outputBinaryRightGlobus', 'in10'),
                      ('outputspec.outputBinaryRightThalamus', 'in11'),
                      ('outputspec.outputBinaryRightHippocampus', 'in12')])])

    ## SnapShotWriter for Segmented result checking:
    SnapShotWriterNodeName = "SnapShotWriter_" + str(sessionid)
    SnapShotWriter = pe.Node(interface=BRAINSSnapShotWriter(),
                             name=SnapShotWriterNodeName)

    SnapShotWriter.inputs.outputFilename = 'snapShot' + str(
        sessionid) + '.png'  # output specification
    SnapShotWriter.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    SnapShotWriter.inputs.inputSliceToExtractInPhysicalPoint = [
        -3, -7, -3, 5, 7, 22, -22
    ]

    baw200.connect([(MergeStage2AverageImages, SnapShotWriter,
                     [('out', 'inputVolumes')]),
                    (MergeStage2BinaryVolumes, SnapShotWriter,
                     [('out', 'inputBinaryVolumes')]),
                    (SnapShotWriter, DataSink,
                     [('outputFilename', 'Segmentations.@outputSnapShot')])])

    # currentAntsLabelWarpToSubject = 'AntsLabelWarpToSubject' + str(subjectid) + "_" + str(sessionid)
    # AntsLabelWarpToSubject = pe.Node(interface=ants.ApplyTransforms(), name=currentAntsLabelWarpToSubject)
    #
    # AntsLabelWarpToSubject.inputs.num_threads = -1
    # AntsLabelWarpToSubject.inputs.dimension = 3
    # AntsLabelWarpToSubject.inputs.output_image = 'warped_hncma_atlas_seg.nii.gz'
    # AntsLabelWarpToSubject.inputs.interpolation = "MultiLabel"
    #
    # baw200.connect([(A2SantsRegistrationPostABCSyN, AntsLabelWarpToSubject, [('composite_transform', 'transforms')]),
    #                 (inputsSpec, AntsLabelWarpToSubject, [('t1_average', 'reference_image'),
    #                                                       ('hncma_atlas', 'input_image')])
    #                 ])
    # #####
    # ### Now define where the final organized outputs should go.
    # AntsLabelWarpedToSubject_DSName = "AntsLabelWarpedToSubject_DS_" + str(sessionid)
    # AntsLabelWarpedToSubject_DS = pe.Node(nio.DataSink(), name=AntsLabelWarpedToSubject_DSName)
    # AntsLabelWarpedToSubject_DS.overwrite = master_config['ds_overwrite']
    # AntsLabelWarpedToSubject_DS.inputs.base_directory = master_config['resultdir']
    # AntsLabelWarpedToSubject_DS.inputs.substitutions = [('AntsLabelWarpedToSubject', os.path.join(projectid, subjectid, sessionid, 'AntsLabelWarpedToSubject'))]
    #
    # baw200.connect([(AntsLabelWarpToSubject, AntsLabelWarpedToSubject_DS, [('output_image', 'AntsLabelWarpedToSubject')])])

    MergeSessionSubjectToAtlasName = "99_MergeSessionSubjectToAtlas_" + str(
        sessionid)
    MergeSessionSubjectToAtlas = pe.Node(interface=Merge(file_count),
                                         run_without_submitting=True,
                                         name=MergeSessionSubjectToAtlasName)

    baw200.connect([
        (myLocalSegWF, MergeSessionSubjectToAtlas,
         [('outputspec.outputBinaryLeftAccumben', 'in1'),
          ('outputspec.outputBinaryLeftCaudate', 'in2'),
          ('outputspec.outputBinaryLeftPutamen', 'in3'),
          ('outputspec.outputBinaryLeftGlobus', 'in4'),
          ('outputspec.outputBinaryLeftThalamus', 'in5'),
          ('outputspec.outputBinaryLeftHippocampus', 'in6'),
          ('outputspec.outputBinaryRightAccumben', 'in7'),
          ('outputspec.outputBinaryRightCaudate', 'in8'),
          ('outputspec.outputBinaryRightPutamen', 'in9'),
          ('outputspec.outputBinaryRightGlobus', 'in10'),
          ('outputspec.outputBinaryRightThalamus', 'in11'),
          ('outputspec.outputBinaryRightHippocampus', 'in12')]),
        # (FixWMPartitioningNode, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
        (inputsSpec, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList',
                                                   'in13')]),
        (inputsSpec, MergeSessionSubjectToAtlas, [('t1_average', 'in14')])
    ])

    if not onlyT1:
        assert file_count == 15
        baw200.connect([(inputsSpec, MergeSessionSubjectToAtlas,
                         [('t2_average', 'in15')])])

    LinearSubjectToAtlasANTsApplyTransformsName = 'LinearSubjectToAtlasANTsApplyTransforms_' + str(
        sessionid)
    LinearSubjectToAtlasANTsApplyTransforms = pe.MapNode(
        interface=ants.ApplyTransforms(),
        iterfield=['input_image'],
        name=LinearSubjectToAtlasANTsApplyTransformsName)
    LinearSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
    LinearSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'Linear'

    baw200.connect([
        (A2SantsRegistrationPostABCSyN,
         LinearSubjectToAtlasANTsApplyTransforms, [
             ('inverse_composite_transform', 'transforms')
         ]),
        (inputsSpec, LinearSubjectToAtlasANTsApplyTransforms,
         [('template_t1', 'reference_image')]),
        (MergeSessionSubjectToAtlas, LinearSubjectToAtlasANTsApplyTransforms,
         [('out', 'input_image')])
    ])

    MergeMultiLabelSessionSubjectToAtlasName = "99_MergeMultiLabelSessionSubjectToAtlas_" + str(
        sessionid)
    MergeMultiLabelSessionSubjectToAtlas = pe.Node(
        interface=Merge(2),
        run_without_submitting=True,
        name=MergeMultiLabelSessionSubjectToAtlasName)

    baw200.connect([(inputsSpec, MergeMultiLabelSessionSubjectToAtlas,
                     [('inputLabels', 'in1'), ('inputHeadLabels', 'in2')])])

    ### This is taking this sessions RF label map back into NAC atlas space.
    #{
    MultiLabelSubjectToAtlasANTsApplyTransformsName = 'MultiLabelSubjectToAtlasANTsApplyTransforms_' + str(
        sessionid) + '_map'
    MultiLabelSubjectToAtlasANTsApplyTransforms = pe.MapNode(
        interface=ants.ApplyTransforms(),
        iterfield=['input_image'],
        name=MultiLabelSubjectToAtlasANTsApplyTransformsName)
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'MultiLabel'

    baw200.connect([
        (A2SantsRegistrationPostABCSyN,
         MultiLabelSubjectToAtlasANTsApplyTransforms, [
             ('inverse_composite_transform', 'transforms')
         ]),
        (inputsSpec, MultiLabelSubjectToAtlasANTsApplyTransforms,
         [('template_t1', 'reference_image')]),
        (MergeMultiLabelSessionSubjectToAtlas,
         MultiLabelSubjectToAtlasANTsApplyTransforms, [('out', 'input_image')])
    ])
    #}
    ### Now we must take the sessions to THIS SUBJECTS personalized atlas.
    #{
    #}

    ### Now define where the final organized outputs should go.
    Subj2Atlas_DSName = "SubjectToAtlas_DS_" + str(sessionid)
    Subj2Atlas_DS = pe.Node(nio.DataSink(), name=Subj2Atlas_DSName)
    Subj2Atlas_DS.overwrite = master_config['ds_overwrite']
    Subj2Atlas_DS.inputs.base_directory = master_config['resultdir']
    Subj2Atlas_DS.inputs.regexp_substitutions = [
        (r'_LinearSubjectToAtlasANTsApplyTransforms_[^/]*',
         r'' + sessionid + '/')
    ]

    baw200.connect([(LinearSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [
        ('output_image', 'SubjectToAtlasWarped.@linear_output_images')
    ])])

    Subj2AtlasTransforms_DSName = "SubjectToAtlasTransforms_DS_" + str(
        sessionid)
    Subj2AtlasTransforms_DS = pe.Node(nio.DataSink(),
                                      name=Subj2AtlasTransforms_DSName)
    Subj2AtlasTransforms_DS.overwrite = master_config['ds_overwrite']
    Subj2AtlasTransforms_DS.inputs.base_directory = master_config['resultdir']
    Subj2AtlasTransforms_DS.inputs.regexp_substitutions = [
        (r'SubjectToAtlasWarped', r'SubjectToAtlasWarped/' + sessionid + '/')
    ]

    baw200.connect([(A2SantsRegistrationPostABCSyN, Subj2AtlasTransforms_DS, [
        ('composite_transform', 'SubjectToAtlasWarped.@composite_transform'),
        ('inverse_composite_transform',
         'SubjectToAtlasWarped.@inverse_composite_transform')
    ])])
    # baw200.connect([(MultiLabelSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [('output_image', 'SubjectToAtlasWarped.@multilabel_output_images')])])

    if master_config['plugin_name'].startswith(
            'SGE'
    ):  # for some nodes, the qsub call needs to be modified on the cluster
        A2SantsRegistrationPostABCSyN.plugin_args = {
            'template': master_config['plugin_args']['template'],
            'overwrite': True,
            'qsub_args': modify_qsub_args(master_config['queue'], 8, 8, 24)
        }
        SnapShotWriter.plugin_args = {
            'template': master_config['plugin_args']['template'],
            'overwrite': True,
            'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1)
        }
        LinearSubjectToAtlasANTsApplyTransforms.plugin_args = {
            'template': master_config['plugin_args']['template'],
            'overwrite': True,
            'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1)
        }
        MultiLabelSubjectToAtlasANTsApplyTransforms.plugin_args = {
            'template': master_config['plugin_args']['template'],
            'overwrite': True,
            'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1)
        }

    return baw200