示例#1
0
def register_ants(moving_image, atlas, output_image):
    reg = Registration()

    reg.inputs.fixed_image = atlas
    reg.inputs.moving_image = moving_image
    reg.inputs.output_transform_prefix = 'transform'
    reg.inputs.output_warped_image = output_image
    reg.inputs.output_transform_prefix = "stx-152"
    reg.inputs.transforms = ['Translation']
    reg.inputs.transform_parameters = [(0.1, )]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]])
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['Mattes']
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [32]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.sampling_percentage = [0.3]
    reg.inputs.convergence_threshold = [1.e-6]
    reg.inputs.convergence_window_size = [20]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[32, 16, 4]]
    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [False]
    reg.inputs.initial_moving_transform_com = True

    reg.run()
def calculateRigidTransforms(frame1, frame2, saveFn):
    """
    Given the pair of images, calculate the rigid transformation from frame2
    to frame1 and save it using the saveFn prefix.

    Inputs:
    - frame1: image at timepoint n
    - frame2: image at timepoint n+1
    - saveFn: the prefix filename where the transform will be saved
    """
    # set up the registration
    reg = Registration()
    reg.inputs.fixed_image = frame1
    reg.inputs.moving_image = frame2
    reg.inputs.output_transform_prefix = saveFn
    reg.inputs.interpolation = 'NearestNeighbor'

    reg.inputs.transforms = ['Rigid']
    reg.inputs.transform_parameters = [(0.1, )]
    reg.inputs.number_of_iterations = [[100, 20]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = False
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initialize_transforms_per_stage = False
    reg.inputs.metric = ['CC']
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [5]
    reg.inputs.sampling_strategy = ['Random']
    reg.inputs.sampling_percentage = [0.05]
    reg.inputs.convergence_threshold = [1.e-2]
    reg.inputs.convergence_window_size = [20]
    reg.inputs.smoothing_sigmas = [[2, 1]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[2, 1]]

    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [True]
    reg.inputs.output_warped_image = False
    reg.inputs.num_threads = 50

    # run the registration
    reg.run()
示例#3
0
def rigidRegFunc(path,fileName,input_fixed,input_moving):
    os.chdir(path)
    reg = Registration()
    # ants-registration parameters:
    reg.inputs.fixed_image = input_fixed  # fixed image
    reg.inputs.moving_image = input_moving  # moving image
    reg.inputs.output_transform_prefix = path  # file path
    reg.inputs.transforms = ['Rigid']  # list of transformations
    reg.inputs.transform_parameters = [(.5,)]
    reg.inputs.number_of_iterations = [[40, 20, 10]]
#    reg.inputs.number_of_iterations = [[1, 1, 1]]
    reg.inputs.dimension = 3
    reg.inputs.initial_moving_transform_com = True
    #reg.inputs.invert_initial_moving_transform = True
    reg.inputs.output_warped_image = True
    reg.inputs.output_inverse_warped_image = True
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.metric = ['MI']  # mutual information
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [64]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.sampling_percentage = [0.5]
    reg.inputs.terminal_output = 'allatonce'
    reg.inputs.convergence_threshold = [1.e-6]
    reg.inputs.convergence_window_size = [10]
    reg.inputs.smoothing_sigmas = [[3, 1, 0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[ 2, 1, 0]]
    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [True]
    reg.terminal_output = 'none'
    reg.inputs.num_threads = 4  # ?
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.95
    reg.inputs.output_warped_image = True
    #reg.inputs.collapse_linear_transforms_to_fixed_image_header = False
    reg.inputs.output_warped_image = path + 'rigid_reg_'+ fileName
    reg.cmdline
    reg.run()
    return
示例#4
0
    def __init__(self,
                 moving_image=['path'],
                 fixed_image=['path'],
                 metric=['CC', 'MeanSquares', 'Demons'],
                 metric_weight=[1.0],
                 transforms=['Affine', 'BSplineSyN'],
                 shrink_factors=[[2, 1], [3, 2, 1]],
                 smoothing_sigmas=[[1, 0], [2, 1, 0]], **options):

        from nipype.interfaces.ants import Registration
        reg = Registration()
        reg.inputs.moving_image = moving_image
        reg.inputs.fixed_image = fixed_image
        reg.inputs.metric = metric
        reg.inputs.metric_weight = metric_weight
        reg.inputs.transforms = transforms
        reg.inputs.shrink_factors = shrink_factors
        reg.inputs.smoothing_sigmas = smoothing_sigmas
        for ef in options:
            setattr(reg.inputs, ef, options[ef])
        self.res = reg.run()
示例#5
0
def registerToTemplate(fixedImgFn,
                       movingImgFn,
                       outFn,
                       outDir,
                       transformPrefix,
                       initialize=False,
                       initialRegFile=0,
                       regType='nonlinear'):
    """
    Register 2 images taken at different timepoints.

    Inputs:
    - fixedImgFn: filename of the fixed image (should be the template image)
    - movingImgFn: filename of the moving image (should be the Jn image)
    - outFn: name of the file to write the transformed image to.
    - outDir: path to the tmp directory
    - transformPrefix: prefix for the transform function
    - initialize: optional parameter to specify the location of the
        transformation matrix from the previous registration
    - initialRegFile: optional parameter to be used with the initialize paramter;
        specifies which output_#Affine.mat file to use
    - regType: optional parameter to specify the type of registration to use
        (affine ['Affine'] or nonlinear ['SyN'])

    Outputs:
    - None

    Effects:
    - Saves the registered image and the registration files
    """
    # Set up the registration
    # For both Affine and SyN transforms
    reg = Registration()
    reg.inputs.fixed_image = fixedImgFn
    reg.inputs.moving_image = movingImgFn
    reg.inputs.output_transform_prefix = transformPrefix
    reg.inputs.interpolation = 'NearestNeighbor'
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = False
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = False

    # Specify certain parameters for the nonlinear/['SyN'] registration
    if regType == 'nonlinear':
        reg.inputs.transforms = ['Affine', 'SyN']
        reg.inputs.transform_parameters = [(2.0, ), (0.25, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
        reg.inputs.metric = ['CC'] * 2
        reg.inputs.metric_weight = [1] * 2
        reg.inputs.radius_or_number_of_bins = [5] * 2
        reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
        reg.inputs.convergence_window_size = [20] * 2
        reg.inputs.smoothing_sigmas = [[1, 0], [2, 1, 0]]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.shrink_factors = [[2, 1], [3, 2, 1]]
        reg.inputs.use_estimate_learning_rate_once = [True, True]
        reg.inputs.use_histogram_matching = [
            True, True
        ]  # This is the default value, but specify it anyway

    # Specify certain parameters for the affine/['Affine'] registration
    elif regType == 'affine':
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(2.0, )]
        reg.inputs.number_of_iterations = [[1500, 200]]
        reg.inputs.metric = ['CC']
        reg.inputs.metric_weight = [1]
        reg.inputs.radius_or_number_of_bins = [5]
        reg.inputs.convergence_threshold = [1.e-8]
        reg.inputs.convergence_window_size = [20]
        reg.inputs.smoothing_sigmas = [[1, 0]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.shrink_factors = [[2, 1]]
        reg.inputs.use_estimate_learning_rate_once = [True]
        reg.inputs.use_histogram_matching = [
            True
        ]  # This is the default, but specify it anyway

    reg.inputs.output_warped_image = outFn
    reg.inputs.num_threads = 50

    # If the registration is initialized, set a few more parameters
    if initialize is True:
        reg.inputs.initial_moving_transform = transformPrefix + str(
            initialRegFile) + 'Affine.mat'
        reg.inputs.invert_initial_moving_transform = False

    # Keep the user updated with the status of the registration
    print("Starting", regType, "registration for", outFn)
    # Run the registration
    reg.run()
    # Keep the user updated with the status of the registration
    print("Finished", regType, "registration for", outFn)
示例#6
0
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 +
                                       [[100, 50, 30]])
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 3 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 4
    reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 4
    reg.inputs.use_histogram_matching = [False] * 3 + [True]
    reg.inputs.initial_moving_transform_com = True

"""

print(reg.cmdline)
"""
3. Run the registration
"""

reg.run()
def prep_for_fmriprep(bidsdir, rawdir, substr):
    #make subject dir, anat and func
    subid = substr.replace('-', '_').replace('_', '')
    anatdir = bidsdir + '/sub-' + subid + '/anat/'
    funcdir = bidsdir + '/sub-' + subid + '/func/'
    os.makedirs(anatdir, exist_ok=True)
    os.makedirs(funcdir, exist_ok=True)

    # get t1brain and MNI template
    t1brain = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres.nii.gz' % substr
    template = str(
        get_template('MNI152NLin2009cAsym',
                     resolution=2,
                     desc='brain',
                     suffix='T1w',
                     extension=['.nii', '.nii.gz']))

    ## registered T1w to template for fmriprep standard
    ### this reg files may not be used

    tranformfile = tempfile.mkdtemp()
    reg = Registration()
    reg.inputs.fixed_image = template
    reg.inputs.moving_image = t1brain
    reg.inputs.output_transform_prefix = tranformfile + '/t12mni_'
    reg.inputs.transforms = ['Affine', 'SyN']
    reg.inputs.transform_parameters = [(2.0, ), (0.25, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
    reg.inputs.dimension = 3
    reg.inputs.num_threads = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = True
    reg.inputs.metric = ['Mattes'] * 2
    reg.inputs.metric_weight = [
        1
    ] * 2  # Default (value ignored currently by ANTs)
    reg.inputs.radius_or_number_of_bins = [32] * 2
    reg.inputs.sampling_strategy = ['Random', None]
    reg.inputs.sampling_percentage = [0.05, None]
    reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
    reg.inputs.convergence_window_size = [20] * 2
    reg.inputs.smoothing_sigmas = [[1, 0], [2, 1, 0]]
    reg.inputs.sigma_units = ['vox'] * 2
    reg.inputs.shrink_factors = [[2, 1], [3, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True, True]
    reg.inputs.use_histogram_matching = [True, True]  # This is the default
    #reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.cmdline
    reg.run()

    ## copy transform file to fmriprep directory
    mni2twtransform = anatdir + '/sub-' + subid + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5'
    t1w2mnitransform = anatdir + '/sub-' + subid + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5'
    copyfile(tranformfile + '/t12mni_Composite.h5', t1w2mnitransform)
    copyfile(tranformfile + '/t12mni_InverseComposite.h5', mni2twtransform)

    ### warp the non-processed/filtered/smooth bold to fmriprep

    ### now functional

    boldmask = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mask.nii.gz' % substr
    boldref = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI_SBREF.nii.gz' % substr
    boldprep = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.nii.gz' % substr

    reffile = tempfile.mkdtemp() + '/reffile.nii.gz'
    boldstd = reffile = tempfile.mkdtemp() + '/boldstd.nii.gz'
    maskstd = reffile = tempfile.mkdtemp() + '/maskstd.nii.gz'
    aw = fsl.ApplyWarp()
    aw.inputs.in_file = boldref
    aw.inputs.ref_file = template
    aw.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw.inputs.out_file = reffile
    aw.inputs.output_type = 'NIFTI_GZ'
    res = aw.run()

    aw1 = fsl.ApplyWarp()
    aw1.inputs.interp = 'spline'
    aw1.inputs.ref_file = template
    aw1.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw1.inputs.in_file = boldprep
    aw1.inputs.out_file = boldstd
    aw1.inputs.output_type = 'NIFTI_GZ'
    res1 = aw1.run()

    aw2 = fsl.ApplyWarp()
    aw2.inputs.in_file = boldmask
    aw2.inputs.ref_file = template
    aw2.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw2.inputs.out_file = maskstd
    aw2.inputs.output_type = 'NIFTI_GZ'
    res2 = aw2.run()

    tr = nb.load(boldprep).header.get_zooms()[-1]

    jsontis = {
        "RepetitionTime": np.float64(tr),
        "TaskName": 'rest',
        "SkullStripped": False,
    }

    jsmaks = {"mask": True}

    #newname
    preprocbold = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
    preprocboldjson = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.json'
    preprocboldref = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_boldref.nii.gz'
    preprocmask = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
    preprocmaskjson = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-brain_mask.json'

    copyfile(maskstd, preprocmask)
    copyfile(reffile, preprocboldref)
    copyfile(boldstd, preprocbold)
    writejson(jsontis, preprocboldjson)
    writejson(jsmaks, preprocmaskjson)

    # get wm and csf mask to extract mean signals for regressors
    ### first warp the anatomical to bold space
    wmask = rawdir + '/UKB_Pipeline/%s/T1/T1_fast/T1_brain_pve_2.nii.gz' % substr
    csfmask = rawdir + '/UKB_Pipeline/%s/T1/T1_fast/T1_brain_pve_0.nii.gz' % substr

    t2funcwmask = tempfile.mkdtemp() + '/wmask.nii.gz'
    t2funcwcsf = tempfile.mkdtemp() + '/csf.nii.gz'

    aw = fsl.preprocess.ApplyXFM()
    aw.inputs.in_file = wmask
    aw.inputs.reference = boldref
    aw.inputs.in_matrix_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres2example_func.mat' % substr
    aw.inputs.out_file = t2funcwmask
    aw.inputs.apply_xfm = True
    aw.inputs.interp = 'nearestneighbour'
    aw.inputs.output_type = 'NIFTI_GZ'
    res = aw.run()

    aw2 = fsl.preprocess.ApplyXFM()
    aw2.inputs.in_file = csfmask
    aw2.inputs.reference = boldref
    aw2.inputs.in_matrix_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres2example_func.mat' % substr
    aw2.inputs.out_file = t2funcwcsf
    aw2.inputs.apply_xfm = True
    aw2.inputs.interp = 'nearestneighbour'
    aw2.inputs.output_type = 'NIFTI_GZ'
    res2 = aw2.run()

    # binarized and extract signals
    wmbin = nb.load(t2funcwmask).get_fdata()
    wmbin[wmbin < 0.99999] = 0

    csfbin = nb.load(t2funcwcsf).get_fdata()
    csfbin[csfbin < 0.99999] = 0

    maskbin = nb.load(boldmask).get_fdata()

    bolddata = nb.load(boldprep).get_fdata()
    wm_mean = bolddata[wmbin > 0, :].mean(axis=0)
    csf_mean = bolddata[csfbin > 0, :].mean(axis=0)
    global_mean = bolddata[maskbin > 0, :].mean(axis=0)

    #### combine all the regressors

    mcfile = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mc/prefiltered_func_data_mcf.par' % substr
    rsmdfile = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mc/prefiltered_func_data_mcf_abs.rms' % substr
    motionfile = np.loadtxt(mcfile)

    rsmd = np.loadtxt(rsmdfile)
    motionparam = pd.DataFrame(
        motionfile,
        columns=['rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y', 'trans_z'])

    otherparam = pd.DataFrame({
        'global_signal': global_mean,
        'white_matter': wm_mean,
        'csf': csf_mean,
        'rmsd': rsmd
    })

    regressors = pd.concat([motionparam, otherparam], axis=1)
    jsonreg = {'regressor': 'not'}
    regcsv = funcdir + '/sub-' + subid + '_task-rest_desc-confounds_timeseries.tsv'
    regjson = funcdir + '/sub-' + subid + '_task-rest_desc-confounds_timeseries.json'

    regressors.to_csv(regcsv, index=None, sep='\t')
    writejson(jsonreg, regjson)
def main(args=None):

    args = arg_parser().parse_args(args)
    FLAIR = args.FLAIR
    MPRAGE = args.T1
    
    prefix=args.prefix + '.'

    if args.mask is None:
        args.temp_mask = os.path.abspath (args.temp_mask)
        args.brain_template = os.path.abspath(args.brain_template)
        args.temp_prob = os.path.abspath(args.temp_prob)
        if not os.path.isfile(args.temp_mask):
            raise Exception("template mask not foud")
        if not os.path.isfile(args.brain_template):
            raise Exception("brain template mask not foud")
        if not os.path.isfile(args.temp_prob):
            raise Exception("template probability mask not foud")
    elif not os.path.isfile(args.mask):
            raise Exception("T1 mask file not foud")

    if not os.path.isfile(MPRAGE):
        raise Exception("Input T1 file not found")
    if not os.path.isfile(FLAIR):
        raise Exception("Input FLAIR file not found")

    if args.outfolder is not None:
        abs_out = os.path.abspath(args.outfolder)
        #print(abs_out)
        if not os.path.exists(abs_out):
            #if selecting a new folder copy the files (not sure how to specify different folder under nipype when it runs sh scripts from ants)
            os.mkdir(abs_out)
        copyfile(os.path.abspath(MPRAGE),os.path.join(abs_out,os.path.basename(MPRAGE)))
        copyfile(os.path.abspath(FLAIR),os.path.join(abs_out,os.path.basename(FLAIR)))
        if args.mask is not None:
            if os.path.isfile(args.mask):
                copyfile(os.path.abspath(args.mask),os.path.join(abs_out, prefix + 'MPRAGE.mask.nii.gz'))
        os.chdir(args.outfolder)
    elif args.mask is not None:
        copyfile(os.path.abspath(args.mask),os.path.join(os.path.abspath(args.mask), prefix + 'MPRAGE.mask.nii.gz'))

    if args.mask is None:
        # T1 brain extraction
        brainextraction = BrainExtraction()
        brainextraction.inputs.dimension = 3
        brainextraction.inputs.anatomical_image = MPRAGE
        brainextraction.inputs.brain_template = args.brain_template
        brainextraction.inputs.brain_probability_mask = args.temp_prob
        brainextraction.inputs.extraction_registration_mask= args.temp_mask
        brainextraction.inputs.debug=True
        print("brain extraction")
        print(' ')
        print(brainextraction.cmdline)
        print('-'*30)
        brainextraction.run()
        os.rename('highres001_BrainExtractionMask.nii.gz',prefix +'MPRAGE.mask.nii.gz')
        os.rename('highres001_BrainExtractionBrain.nii.gz',prefix +'MPRAGE.brain.nii.gz')
        os.remove('highres001_BrainExtractionPrior0GenericAffine.mat')
        os.rmdir('highres001_')

    #two step registration with ants (step1)

    reg = Registration()
    reg.inputs.fixed_image = FLAIR
    reg.inputs.moving_image = MPRAGE
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg.inputs.dimension = 3
    reg.inputs.transforms = ['Rigid']
    reg.inputs.transform_parameters = [[0.1]]
    reg.inputs.radius_or_number_of_bins = [32]
    reg.inputs.metric = ['MI']
    reg.inputs.sampling_percentage = [0.1]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.shrink_factors = [[4,3,2,1]]
    reg.inputs.smoothing_sigmas = [[3,2,1,0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.use_histogram_matching = [False]
    reg.inputs.number_of_iterations = [[1000,500,250,100]]
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.975
    print("first pass registration")
    print(' ')
    print(reg.cmdline)
    print('-'*30)
    reg.run()

    os.rename('output_0GenericAffine.mat',prefix + 'MPRAGE_to_FLAIR.firstpass.mat')

    #apply tranform MPRAGE mask to FLAIR

    at = ApplyTransforms()
    at.inputs.dimension = 3
    at.inputs.input_image = prefix + 'MPRAGE.mask.nii.gz'
    at.inputs.reference_image = FLAIR
    at.inputs.output_image = prefix + 'FLAIR.mask.nii.gz'
    at.inputs.interpolation = 'MultiLabel'
    at.inputs.default_value = 0
    at.inputs.transforms = [ prefix + 'MPRAGE_to_FLAIR.firstpass.mat']
    at.inputs.invert_transform_flags = [False]
    print("apply stranform to T1 maks")
    print(' ')
    print(at.cmdline)
    print('-'*30)    
    at.run()

    # bias correct FLAIR and MPRAGE

    n4m = N4BiasFieldCorrection()
    n4m.inputs.dimension = 3
    n4m.inputs.input_image = MPRAGE
    n4m.inputs.mask_image = prefix + 'MPRAGE.mask.nii.gz'
    n4m.inputs.bspline_fitting_distance = 300
    n4m.inputs.shrink_factor = 3
    n4m.inputs.n_iterations = [50,50,30,20]
    n4m.inputs.output_image = prefix + 'MPRAGE.N4.nii.gz'
    print("bias correcting T1")
    print(' ')
    print(n4m.cmdline)
    print('-'*30)
    n4m.run()

    n4f = copy.deepcopy(n4m)
    n4f.inputs.input_image = FLAIR
    n4f.inputs.mask_image = prefix + 'FLAIR.mask.nii.gz'
    n4f.inputs.output_image = prefix + 'FLAIR.N4.nii.gz'
    print("bias correcting FLAIR")
    print(' ')
    print(n4f.cmdline)
    print('-'*30)
    n4f.run()

    # mask bias corrected FLAIR and MPRAGE

    calc = afni.Calc()
    calc.inputs.in_file_a = prefix + 'FLAIR.N4.nii.gz'
    calc.inputs.in_file_b = prefix + 'FLAIR.mask.nii.gz'
    calc.inputs.expr='a*b'
    calc.inputs.out_file = prefix +  'FLAIR.N4.masked.nii.gz'
    calc.inputs.outputtype = 'NIFTI'
    calc.inputs.overwrite = True
    calc.run()

    calc1= copy.deepcopy(calc)
    calc1.inputs.in_file_a = prefix + 'MPRAGE.N4.nii.gz'
    calc1.inputs.in_file_b = prefix + 'MPRAGE.mask.nii.gz'
    calc1.inputs.out_file = prefix +  'MPRAGE.N4.masked.nii.gz'
    calc1.inputs.overwrite = True
    calc1.run()

    #register bias corrected

    reg1 = copy.deepcopy(reg)
    reg1.inputs.output_transform_prefix = "output_"
    reg1.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg1.inputs.initial_moving_transform = prefix +'MPRAGE_to_FLAIR.firstpass.mat'
    print("second pass registration")
    print(' ')
    print(reg1.cmdline)
    print('-'*30)
    reg1.run()
    os.rename('output_0GenericAffine.mat',prefix +'MPRAGE_to_FLAIR.secondpass.mat')
    
    
    #generate final mask in FLAIR space

    atf = ApplyTransforms()
    atf.inputs.dimension = 3
    atf.inputs.input_image = prefix + 'MPRAGE.N4.nii.gz'
    atf.inputs.reference_image = FLAIR
    atf.inputs.output_image = prefix + 'MPRAGE.N4.toFLAIR.nii.gz'
    atf.inputs.interpolation = 'BSpline'
    atf.inputs.interpolation_parameters = (3,)
    atf.inputs.default_value = 0
    atf.inputs.transforms = [prefix +  'MPRAGE_to_FLAIR.secondpass.mat']
    atf.inputs.invert_transform_flags = [False]
    print("final apply transform")
    print(' ')
    print(atf.cmdline)
    print('-'*30)
    atf.run()


    #cleanup

    os.remove(prefix + 'output_warped_image.nii.gz')

    if args.outfolder is not None:
        os.remove(os.path.join(abs_out,os.path.basename(MPRAGE)))
        os.remove(os.path.join(abs_out,os.path.basename(FLAIR)))
        
    if args.mask is None:
        os.remove(prefix + 'MPRAGE.brain.nii.gz')
        
    if not args.storetemp:
        os.remove(prefix + 'MPRAGE.mask.nii.gz')
        os.remove(prefix + 'MPRAGE_to_FLAIR.firstpass.mat')
        os.remove(prefix + 'FLAIR.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.nii.gz')


    return
示例#9
0
def preprocess(data_dir, subject, atlas_dir, output_dir):
    with tempfile.TemporaryDirectory() as temp_dir:
        if not os.path.exists(os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')):
            if os.path.exists(os.path.join(data_dir, subject, 'FLAIR.nii.gz')):
                # reorient to MNI standard direction
                reorient = fsl.utils.Reorient2Std()
                reorient.inputs.in_file = os.path.join(data_dir, subject, 'FLAIR.nii.gz')
                reorient.inputs.out_file = os.path.join(temp_dir, 'FLAIR_reorient.nii.gz')
                reorient.run()

                # robust fov to remove neck and lower head automatically
                rf = fsl.utils.RobustFOV()
                rf.inputs.in_file = os.path.join(temp_dir, 'FLAIR_reorient.nii.gz')
                rf.inputs.out_roi = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                rf.run()

                # skull stripping first run
                print('BET pre-stripping...')
                btr1 = fsl.BET()
                btr1.inputs.in_file = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                btr1.inputs.robust = True
                btr1.inputs.frac = 0.2
                btr1.inputs.out_file = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                btr1.run()

                # N4 bias field correction
                print('N4 Bias Field Correction running...')
                input_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                output_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                subprocess.call('N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 '
                                '--input-image %s --convergence [ 50x50x30x20 ] --output %s --shrink-factor 3'
                                % (input_image, output_image), shell=True)

                # registration of FLAIR to MNI152 FLAIR template
                print('ANTs registration...')
                reg = Registration()
                reg.inputs.fixed_image = atlas_dir + '/flair_test.nii.gz'
                reg.inputs.moving_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                reg.inputs.output_transform_prefix = os.path.join(output_dir, subject, 'FLAIR_r_transform.mat')
                reg.inputs.winsorize_upper_quantile = 0.995
                reg.inputs.winsorize_lower_quantile = 0.005
                reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
                reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1, 3.0, 0.0)]
                reg.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]]
                reg.inputs.dimension = 3
                reg.inputs.initial_moving_transform_com = 0
                reg.inputs.write_composite_transform = True
                reg.inputs.collapse_output_transforms = False
                reg.inputs.initialize_transforms_per_stage = False
                reg.inputs.metric = ['Mattes', 'Mattes', ['Mattes', 'CC']]
                reg.inputs.metric_weight = [1, 1, [.5, .5]]  # Default (value ignored currently by ANTs)
                reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
                reg.inputs.sampling_strategy = ['Random', 'Random', None]
                reg.inputs.sampling_percentage = [0.25, 0.25, [0.05, 0.10]]
                reg.inputs.convergence_threshold = [1e-6, 1.e-6, 1.e-6]
                reg.inputs.convergence_window_size = [10] * 3
                reg.inputs.smoothing_sigmas = [[3, 2, 1, 0], [3, 2, 1, 0], [3, 2, 1, 0]]
                reg.inputs.sigma_units = ['vox'] * 3
                reg.inputs.shrink_factors = [[8, 4, 2, 1], [8, 4, 2, 1], [8, 4, 2, 1]]
                reg.inputs.use_estimate_learning_rate_once = [True, True, True]
                reg.inputs.use_histogram_matching = [True, True, True]  # This is the default
                reg.inputs.output_warped_image = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                reg.inputs.verbose = True
                reg.run()

                # second pass of BET skull stripping
                print('BET skull stripping...')
                btr2 = fsl.BET()
                btr2.inputs.in_file = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                btr2.inputs.robust = True
                btr2.inputs.frac = 0.1
                btr2.inputs.mask = True
                btr2.inputs.out_file = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                btr2.run()

                # copy mask file to output folder
                shutil.copy2(os.path.join(output_dir, subject, 'ANTS_FLAIR_r_mask.nii.gz'),
                             os.path.join(temp_dir, 'ANTS_FLAIR_r_mask.nii.gz'))

                # z score normalization
                FLAIR_path = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                FLAIR_final = nib.load(FLAIR_path)
                FLAIR_mask_path = os.path.join(temp_dir, 'ANTS_FLAIR_r_mask.nii.gz')
                mask = nib.load(FLAIR_mask_path)
                FLAIR_norm = zscore_normalize(FLAIR_final, mask)
                nib.save(FLAIR_norm, FLAIR_path)

                print('.........................')
                print('patient %s registration done' % subject)
            else:
                pass
        else:
            pass
示例#10
0
 def register(self, i):
     """
     Aligns a 2D histology image to a 2D blockface image using ANTs 
     registration tools.
     """
     #Create naming convention for aligned files.
     slice_num = ''
     if i < 10:
         slice_num = '000' + str(i)
     elif i < 100:
         slice_num = '00' + str(i)
     elif i < 1000:
         slice_num = '0' + str(i)
     elif slice < 10000:
         slice_num = str(i)
     #Define registration parameters for ANT's Registration command through Nipype.
     reg = Registration()
     #reg.inputs.verbose = True
     reg.inputs.fixed_image = self.BF_vol.slices[i].path
     reg.inputs.moving_image = self.Hist_vol.slices[i].path
     reg.inputs.output_warped_image = 'Hist_to_BF_{}.nii.gz'.format(
         slice_num)
     reg.inputs.output_transform_prefix = "composite_transform_{}.h5".format(
         slice_num)
     if self.reg_method == 'nonlinear':
         reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
         reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, ),
                                            (0.1, )]
         reg.inputs.number_of_iterations = ([[1500, 500, 250]] * 4)
         reg.inputs.metric = ['Mattes'] * 4
         reg.inputs.metric_weight = [1] * 4
         reg.inputs.radius_or_number_of_bins = [32] * 4
         reg.inputs.sampling_strategy = ['Regular'] * 4
         reg.inputs.sampling_percentage = [0.3] * 4
         reg.inputs.convergence_threshold = [1.e-6] * 4
         reg.inputs.convergence_window_size = [20] * 4
         reg.inputs.smoothing_sigmas = [[0, 0, 0]] * 4
         reg.inputs.sigma_units = ['vox'] * 4
         reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 3
         reg.inputs.use_estimate_learning_rate_once = [True] * 4
         reg.inputs.use_histogram_matching = [False] * 4
     else:
         if self.reg_method != 'linear':
             warnings.warn(
                 "Can't Interpret registration method, Defaulting to linear alignment."
             )
         reg.inputs.transforms = ['Translation', 'Rigid', 'Affine']
         reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, )]
         reg.inputs.number_of_iterations = ([[1500, 500, 250]] * 3)
         reg.inputs.metric = ['Mattes'] * 3
         reg.inputs.metric_weight = [1] * 3
         reg.inputs.radius_or_number_of_bins = [32] * 3
         reg.inputs.sampling_strategy = ['Regular'] * 3
         reg.inputs.sampling_percentage = [0.3] * 3
         reg.inputs.convergence_threshold = [1.e-6] * 3
         reg.inputs.convergence_window_size = [20] * 3
         reg.inputs.smoothing_sigmas = [[0, 0, 0]] * 3
         reg.inputs.sigma_units = ['vox'] * 3
         reg.inputs.shrink_factors = [[3, 2, 1]] * 3
         reg.inputs.use_estimate_learning_rate_once = [True] * 3
         reg.inputs.use_histogram_matching = [False] * 3
     reg.inputs.interpolation = 'BSpline'
     reg.inputs.dimension = 2
     reg.inputs.write_composite_transform = True
     reg.inputs.collapse_output_transforms = True
     reg.inputs.initial_moving_transform_com = True
     reg.inputs.float = True
     reg.inputs.ignore_exception = True
     outputs = reg._list_outputs()
     #print(reg.cmdline)
     #Copy output files to output directory.
     print("##################################",
           'Hist_to_BF_{}.nii.gz is RUNNING'.format(slice_num),
           "##################################\n")
     reg.run()
     shutil.move(
         outputs['warped_image'],
         self.out_dir + '/grayscale/Hist_to_BF_{}.nii.gz'.format(slice_num))
     shutil.move(
         outputs['composite_transform'], self.out_dir +
         '/composite_transform/composite_transform_{}.h5'.format(slice_num))
     print("##################################",
           'Hist_to_BF_{}.nii.gz is COMPLETE'.format(slice_num),
           "##################################\n")
示例#11
0
 def blockface_to_MRI_alignment(self, out_dir):
     """
     3D nonlinear alignment of the blockface NIFTI volume to the MRI reference volume.
     Alignment uses the program ANTs through the Nipype module.
     Outputs a 3D composite transform file that allows for warping of files from blockface space to MRI space.
     NOTE: VERY TIME CONSUMING STEP WHEN MRI HAS BEEN RESAMPLED TO BLOCKFACE OR HISTOLOGY RESOLUTION
     """
     out_dir = os.path.abspath(out_dir) + '/'
     if self.overwrite == False and os.path.isfile(
             out_dir + 'MRI_to_blockface_linear_alignment.nii.gz'):
         print(' - Reference MRI Already Linearly Aligned to Blockface')
         return
     else:
         out_dir = os.path.abspath(out_dir) + '/'
         #Define inputs to nipype's ANTs Registration.
         reg = Registration()
         reg.inputs.fixed_image = self.BF_low_res_name
         reg.inputs.moving_image = self.MRI
         reg.inputs.output_warped_image = out_dir + 'MRI_to_blockface_linear_alignment.nii.gz'
         reg.inputs.output_transform_prefix = out_dir + "composite_transform_MRI_to_blockface_alignment_linear"
         reg.inputs.transforms = ['Translation', 'Rigid']
         reg.inputs.transform_parameters = [(0.1, ), (0.1, )]
         reg.inputs.number_of_iterations = ([[1500, 500, 250]] * 2)
         reg.inputs.interpolation = 'BSpline'
         reg.inputs.dimension = 3
         reg.inputs.write_composite_transform = True
         reg.inputs.collapse_output_transforms = True
         reg.inputs.metric = ['Mattes'] * 2
         reg.inputs.metric_weight = [1] * 2
         reg.inputs.radius_or_number_of_bins = [32] * 2
         reg.inputs.sampling_strategy = ['Regular'] * 2
         reg.inputs.sampling_percentage = [0.3] * 2
         reg.inputs.convergence_threshold = [1.e-6] * 2
         reg.inputs.convergence_window_size = [20] * 2
         reg.inputs.smoothing_sigmas = [[0, 0, 0]] * 2
         reg.inputs.sigma_units = ['vox'] * 2
         reg.inputs.shrink_factors = [[3, 2, 1]] * 2
         reg.inputs.use_estimate_learning_rate_once = [True] * 2
         reg.inputs.use_histogram_matching = [False] * 2
         reg.inputs.initial_moving_transform_com = True
         reg.inputs.num_threads = self.threads
         reg.inputs.verbose = True
         print(reg.cmdline)
         reg.run()
         #Define inputs to nipype's ANTs Registration.
     if self.overwrite == False and os.path.isfile(
             out_dir + 'blockface_to_MRI_alignment.nii.gz'):
         print(' - Blockface images already aligned to reference MRI')
         return
     else:
         reg = Registration()
         reg.inputs.fixed_image = out_dir + 'MRI_to_blockface_linear_alignment.nii.gz'
         reg.inputs.moving_image = self.BF_vol
         reg.inputs.output_warped_image = out_dir + 'blockface_to_MRI_alignment.nii.gz'
         reg.inputs.output_transform_prefix = out_dir + "composite_transform_blockface_to_MRI_alignment"
         reg.inputs.transforms = ['Affine', 'SyN']
         reg.inputs.transform_parameters = [(0.1, ), (0.1, )]
         reg.inputs.number_of_iterations = ([[1500, 500, 250]] * 2)
         reg.inputs.interpolation = 'BSpline'
         reg.inputs.dimension = 3
         reg.inputs.write_composite_transform = True
         reg.inputs.collapse_output_transforms = True
         reg.inputs.metric = ['Mattes'] * 2
         reg.inputs.metric_weight = [1] * 2
         reg.inputs.radius_or_number_of_bins = [32] * 2
         reg.inputs.sampling_strategy = ['Regular'] * 2
         reg.inputs.sampling_percentage = [0.3] * 2
         reg.inputs.convergence_threshold = [1.e-6] * 2
         reg.inputs.convergence_window_size = [20] * 2
         reg.inputs.smoothing_sigmas = [[0, 0, 0]] * 2
         reg.inputs.sigma_units = ['vox'] * 2
         reg.inputs.shrink_factors = [[3, 2, 1]] * 2
         reg.inputs.use_estimate_learning_rate_once = [True] * 2
         reg.inputs.use_histogram_matching = [False] * 2
         reg.inputs.initial_moving_transform_com = True
         reg.inputs.num_threads = self.threads
         reg.inputs.verbose = True
         print(reg.cmdline)
         reg.run()
         return
示例#12
0
def registerImage(itk_moving,
                  itk_fixed,
                  store_to=None,
                  type="affine",
                  metric="MI",
                  speed="fast",
                  itk_InitialMovingAffTrf=None,
                  itk_InitialFixedAffTrf=None,
                  n_cores=8,
                  verbose=False):
    """
    Perform a registration using ANTs
    :param itk_moving: itk volume moving
    :param itk_fixed: itk volume fixed
    :param store_to: path to directory to store output, deletes tmp directory if defined
    :param type: string, "affine", "rigid", "deformable"
    :param metric: string "CC","MI"
    :param speed: string, "accurate","better","normal","fast","debug"
    :param itk_InitialMovingAffTrf: itk Transform, moving
    :param itk_InitialFixedAffTrf: itk Transform, fixed
    """

    # prepare environment / path
    main_dir = os.path.abspath(os.path.dirname(__file__))
    path_dir = os.path.join(main_dir, "bin")
    lib_dir = os.path.join(main_dir, "lib")
    tmp_dir = os.path.join(main_dir, "ants_tmp")
    cwd_old = os.getcwd()

    has_ANTs = bool(find_executable("antsRegistration"))
    has_PATH = "PATH" in os.environ
    if has_PATH:
        PATH_old = os.environ["PATH"]
    if not has_ANTs:
        os.environ["PATH"] = (os.environ["PATH"] +
                              os.pathsep if has_PATH else "") + path_dir
        if not find_executable("antsRegistration"):
            raise Exception("No executable file \"antsRegistration\" in PATH.")

    has_LD_LIBRARY = "LD_LIBRARY_PATH" in os.environ
    if has_LD_LIBRARY:
        LD_LIBRARY_old = os.environ["LD_LIBRARY_PATH"]
    if not has_ANTs:
        os.environ["LD_LIBRARY_PATH"] = (os.environ["LD_LIBRARY_PATH"] +
                                         os.pathsep
                                         if has_LD_LIBRARY else "") + lib_dir

    has_NUMBERCORES = "ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS" in os.environ
    if has_NUMBERCORES:
        NUMBERCORES_old = os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"]
    os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "{}".format(n_cores)

    # clean tmp directory
    shutil.rmtree(tmp_dir, ignore_errors=True)
    os.mkdir(tmp_dir)

    # write volumes
    moving_path = os.path.join(tmp_dir, "moving.nii.gz")
    fixed_path = os.path.join(tmp_dir, "fixed.nii.gz")
    sitk.WriteImage(itk_moving, moving_path)
    sitk.WriteImage(itk_fixed, fixed_path)

    # write initial transforms
    if itk_InitialFixedAffTrf:
        fixedtrf_path = os.path.join(tmp_dir, "initialfixedtrf.mat")
        sitk.WriteTransform(itk_InitialFixedAffTrf, fixedtrf_path)
    if itk_InitialMovingAffTrf:
        movingtrf_path = os.path.join(tmp_dir, "initialmovingtrf.mat")
        sitk.WriteTransform(itk_InitialMovingAffTrf, movingtrf_path)

    # switch to tmp dir
    os.chdir(tmp_dir)

    # setup registration parameters
    reg = Registration()

    reg.inputs.fixed_image = fixed_path
    reg.inputs.moving_image = moving_path
    reg.num_threads = n_cores

    if itk_InitialFixedAffTrf:
        reg.inputs.initial_fixed_transform = fixedtrf_path
    if itk_InitialMovingAffTrf:
        reg.inputs.initial_moving_transform = movingtrf_path

    # shared settings

    # what does this do?
    reg.inputs.args = '--float 0'

    # warped moving image
    reg.inputs.output_warped_image = 'moving_warped.nii.gz'

    # dimensionality
    reg.inputs.dimension = 3
    # output prefix
    reg.inputs.output_transform_prefix = "output_"
    # interpolation
    reg.inputs.interpolation = "Linear"
    # winsorize-image-intensities
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    # histogram matching
    reg.inputs.use_histogram_matching = False
    # write-composite-transform
    reg.inputs.write_composite_transform = False

    # convergence
    if speed == "accurate":
        reg.inputs.number_of_iterations = ([[1000, 100, 50, 20]])
        reg.inputs.convergence_threshold = [1.e-6]
        reg.inputs.convergence_window_size = [10]

        reg.inputs.shrink_factors = [[8, 4, 3, 1]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.smoothing_sigmas = [[4, 3, 2, 1]]
    elif speed == "better":
        reg.inputs.number_of_iterations = ([[200, 100, 50, 20]])
        reg.inputs.convergence_threshold = [1.e-6]
        reg.inputs.convergence_window_size = [10]

        reg.inputs.shrink_factors = [[8, 4, 2, 1]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.smoothing_sigmas = [[4, 3, 2, 1]]
    elif speed == "normal":
        reg.inputs.number_of_iterations = ([[100, 50, 10]])
        reg.inputs.convergence_threshold = [1.e-6]
        reg.inputs.convergence_window_size = [10]

        reg.inputs.shrink_factors = [[8, 4, 2]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
    elif speed == "fast":
        reg.inputs.number_of_iterations = ([[100, 50]])
        reg.inputs.convergence_threshold = [1.e-6]
        reg.inputs.convergence_window_size = [10]

        reg.inputs.shrink_factors = [[4, 3]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.smoothing_sigmas = [[3, 2]]
    elif speed == "debug":
        reg.inputs.number_of_iterations = ([[10]])
        reg.inputs.convergence_threshold = [1.e-6]
        reg.inputs.convergence_window_size = [10]

        reg.inputs.shrink_factors = [[4]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.smoothing_sigmas = [[3]]
    else:
        raise Exception(
            "Parameter speed must be from the list: accurate, better, normal, fast, debug"
        )

    # metric
    if metric == "MI":
        reg.inputs.metric = ["MI"]
        reg.inputs.metric_weight = [1.0]
        reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.sampling_strategy = ['Regular']
        reg.inputs.sampling_percentage = [0.25]
    elif metric == "CC":
        reg.inputs.metric = ["MI"]
        reg.inputs.metric_weight = [1.0]
        reg.inputs.radius_or_number_of_bins = [4]
        reg.inputs.sampling_strategy = ['None']
        reg.inputs.sampling_percentage = [0.1]
    else:
        raise Exception("Parameter metric must be from the list: MI,CC")

    # type-specific settings
    if type == "affine":
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(0.1, )]
    elif type == "rigid":
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
    elif type == "deformable":
        reg.inputs.transforms = ['SyN']
        reg.inputs.transform_parameters = [(0.25, )]
    else:
        raise Exception(
            "Parameter type must be from the list: affine, rigid, deformable")

    if verbose:
        print("Using antsRegistration from: {}".format(
            find_executable("antsRegistration")))
        print("Executing: {}".format(reg.cmdline))
    # perform ants call (retrieve by reg.cmdline)
    reg.run()

    # reset environment
    if not has_ANTs:
        if has_PATH:
            os.environ["PATH"] = PATH_old
        else:
            del os.environ["PATH"]

        if has_LD_LIBRARY:
            os.environ["LD_LIBRARY_PATH"] = LD_LIBRARY_old
        else:
            del os.environ["LD_LIBRARY_PATH"]

    if has_NUMBERCORES:
        os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = NUMBERCORES_old
    else:
        del os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"]

    if type == 'affine':
        #output_trf_path = ["output_0Affine.mat"]
        output_trf_path = reg._list_outputs().get('forward_transforms',
                                                  ["output_0Affine.mat"])
    elif type == 'rigid':
        #output_trf_path = ["output_0Rigid.mat"]
        output_trf_path = reg._list_outputs().get('forward_transforms',
                                                  ["output_0Rigid.mat"])
    elif type == 'deformable':
        #output_trf_path = ["output_0Warp.nii.gz","output_0InverseWarp.nii.gz"]
        output_trf_path = reg._list_outputs().get(
            'forward_transforms',
            ["output_0Warp.nii.gz"]) + reg._list_outputs().get(
                'reverse_transforms', ["output_0InverseWarp.nii.gz"])

    output_paths_trf = [os.path.join(tmp_dir, p) for p in output_trf_path]
    warped_path = os.path.join(tmp_dir, 'moving_warped.nii.gz')

    # switch back to old cwd
    os.chdir(cwd_old)

    if store_to:
        # copy output files
        moved_output_paths_trf = []
        for tf in output_paths_trf:
            moved = os.path.join(store_to, os.path.basename(tf))
            moved_output_paths_trf.append(moved)
            shutil.copy(tf, moved)

        moved_warped_path = os.path.join(store_to,
                                         os.path.basename(warped_path))
        shutil.copy(warped_path, moved_warped_path)

        # clear tmp directory
        shutil.rmtree(tmp_dir)

        if verbose:
            print("Store transform(s) to: {}".format(
                ", ".join(moved_output_paths_trf)))
            print("Store warped volume to: {}".format(
                ", ".join(moved_warped_path)))

        return {
            "transforms_out": moved_output_paths_trf,
            "warpedMovingVolume": moved_warped_path,
        }
    else:
        if verbose:
            print("Store transform(s) to: {}".format(
                ", ".join(output_paths_trf)))
            print("Store warped volume to: {}".format(", ".join(warped_path)))

        return {
            "transforms_out": output_paths_trf,
            "warpedMovingVolume": warped_path,
        }
示例#13
0
def ANTs(fixedImg,
         movingImg,
         fixedImgLandmarks,
         movingImgLandmarks,
         lowerFence,
         upperFence,
         r=5000):
    img2 = nib.Nifti1Image(fixedImgLandmarks, np.eye(4))
    nb.save(img2, 'fixed.nii')
    img3 = nib.Nifti1Image(movingImgLandmarks, np.eye(4))
    nb.save(img3, 'moving.nii')
    reg = Registration()
    reg.inputs.fixed_image = 'fixed.nii'
    reg.inputs.moving_image = 'moving.nii'
    reg.inputs.output_transform_prefix = 'thisTransform'
    reg.inputs.output_warped_image = 'registered.nii.gz'
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Translation', 'Rigid', 'Affine']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, )]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3)
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['MeanSquares'] * 3
    reg.inputs.metric_weight = [1] * 3
    reg.inputs.radius_or_number_of_bins = [32] * 3
    reg.inputs.sampling_strategy = ['Regular'] * 3
    reg.inputs.sampling_percentage = [0.3] * 3
    reg.inputs.convergence_threshold = [1.e-8] * 3
    reg.inputs.convergence_window_size = [20] * 3
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 3
    reg.inputs.initial_moving_transform_com = True
    reg.run()

    real_registered = os.path.join('registered.nii.gz')
    img = nib.load(real_registered)
    real_registered_img = img.get_data()

    print 'clustering'
    registeredClusters = cLib.clusterThresh(real_registered_img, lowerFence,
                                            upperFence)
    fixedClusters = cLib.clusterThresh(fixedImg, lowerFence, upperFence)
    movingClusters = cLib.clusterThresh(movingImg, lowerFence, upperFence)

    print 'registering clusters'
    #l2 centroid match, capped at r
    A = [elem.getCentroid() for elem in fixedClusters]
    B = [elem.getCentroid() for elem in movingClusters]

    tree = KDTree(B)
    for baseIdx, a in enumerate(A):
        dist, idx = tree.query(a, k=1, distance_upper_bound=r)
        if dist == float('Inf'):
            fixedClusters[baseIdx].timeRegistration = Cluster([[-1, -1, -1]])
        else:
            fixedClusters[baseIdx].timeRegistration = movingClusters[idx]

    return fixedClusters
示例#14
0
reg.inputs.collapse_output_transforms=True
reg.inputs.fixed_image=template_path
reg.inputs.initial_moving_transform_com=True
reg.inputs.num_threads=2
reg.output_inverse_warped_image=True
reg.inputs.sigma_units=['vox']*3
reg.inputs.transforms=['Rigid', 'Affine', 'SyN']
reg.inputs.winsorize_lower_quantile=0.005
reg.inputs.winsorize_upper_quantile=0.995
reg.inputs.convergence_threshold=[1e-06]
reg.inputs.convergence_window_size=[10]
reg.inputs.metric=['MI', 'MI', 'CC']
reg.inputs.metric_weight=[1.0]*3
reg.inputs.number_of_iterations=[[1000, 500, 250, 100],
                                 [1000, 500, 250, 100],
                                 [100, 70, 50, 20]]
reg.inputs.radius_or_number_of_bins=[32, 32, 4]
reg.inputs.sampling_percentage=[0.25, 0.25, 1]
reg.inputs.sampling_strategy=['Regular',
                              'Regular',
                              'None']
reg.inputs.shrink_factors=[[8, 4, 2, 1]]*3
reg.inputs.smoothing_sigmas=[[3, 2, 1, 0]]*3
reg.inputs.transform_parameters=[(0.1,),
                                 (0.1,),
                                 (0.1, 3.0, 0.0)]
reg.inputs.use_histogram_matching=True
reg.inputs.write_composite_transform=True

%timeit reg.run()
示例#15
0
reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz'

reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.3, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 0, 0], [10000, 0, 0], [10000, 0, 0], [10, 0, 0]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.metric = ['Mattes']*4
reg.inputs.metric_weight = [1]*4 # Default (value ignored currently by ANTs)
reg.inputs.radius_or_number_of_bins = [32]*4
reg.inputs.sampling_strategy = ['Regular']*3 + [None]
reg.inputs.sampling_percentage = [0.1]*3 + [None]
reg.inputs.convergence_threshold = [1.e-8]*4
reg.inputs.convergence_window_size = [20]*4
reg.inputs.smoothing_sigmas = [[4,2,1]]*3 + [[2,1,0]]
reg.inputs.sigma_units = ['vox']*4
reg.inputs.shrink_factors = [[6,4,2]]*3 + [[4,2,1]]
reg.inputs.use_estimate_learning_rate_once = [True, True, True, True]
reg.inputs.use_histogram_matching = [False]*3 + [True] # This is the default
reg.inputs.initial_moving_transform_com = True
reg.inputs.output_warped_image = True
reg.cmdline


"""
3. Run the registration
"""

reg.run()
示例#16
0
def embedded_antsreg_2d(source_image,
                        target_image,
                        run_rigid=False,
                        rigid_iterations=1000,
                        run_affine=False,
                        affine_iterations=1000,
                        run_syn=True,
                        coarse_iterations=40,
                        medium_iterations=50,
                        fine_iterations=40,
                        cost_function='MutualInformation',
                        interpolation='NearestNeighbor',
                        convergence=1e-6,
                        ignore_affine=False,
                        ignore_header=False,
                        save_data=False,
                        overwrite=False,
                        output_dir=None,
                        file_name=None):
    """ Embedded ANTS Registration 2D

    Runs the rigid and/or Symmetric Normalization (SyN) algorithm of ANTs and
    formats the output deformations into voxel coordinate mappings as used in
    CBSTools registration and transformation routines.

    Parameters
    ----------
    source_image: niimg
        Image to register
    target_image: niimg
        Reference image to match
    run_rigid: bool
        Whether or not to run a rigid registration first (default is False)
    rigid_iterations: float
        Number of iterations in the rigid step (default is 1000)
    run_affine: bool
        Whether or not to run a affine registration first (default is False)
    affine_iterations: float
        Number of iterations in the affine step (default is 1000)
    run_syn: bool
        Whether or not to run a SyN registration (default is True)
    coarse_iterations: float
        Number of iterations at the coarse level (default is 40)
    medium_iterations: float
        Number of iterations at the medium level (default is 50)
    fine_iterations: float
        Number of iterations at the fine level (default is 40)
    cost_function: {'CrossCorrelation', 'MutualInformation'}
        Cost function for the registration (default is 'MutualInformation')
    interpolation: {'NearestNeighbor', 'Linear'}
        Interpolation for the registration result (default is 'NearestNeighbor')
    convergence: flaot
        Threshold for convergence, can make the algorithm very slow
        (default is convergence)
    ignore_affine: bool
        Ignore the affine matrix information extracted from the image header
        (default is False)
    ignore_header: bool
        Ignore the orientation information and affine matrix information
        extracted from the image header (default is False)
    save_data: bool
        Save output data to file (default is False)
    overwrite: bool
        Overwrite existing results (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * transformed_source (niimg): Deformed source image (_ants-def)
        * mapping (niimg): Coordinate mapping from source to target (_ants-map)
        * inverse (niimg): Inverse coordinate mapping from target to source
          (_ants-invmap)

    Notes
    ----------
    Port of the CBSTools Java module by Pierre-Louis Bazin. The main algorithm
    is part of the ANTs software by Brian Avants and colleagues [1]_. The
    interfacing with ANTs is performed through Nipype [2]_. Parameters have been
    set to values commonly found in neuroimaging scripts online, but not
    necessarily optimal.

    References
    ----------
    .. [1] Avants et al (2008), Symmetric diffeomorphic
       image registration with cross-correlation: evaluating automated labeling
       of elderly and neurodegenerative brain, Med Image Anal. 12(1):26-41
    .. [2] Gorgolewski et al (2011) Nipype: a flexible, lightweight and
       extensible neuroimaging data processing framework in python.
       Front Neuroinform 5. doi:10.3389/fninf.2011.00013
    """

    print('\nEmbedded ANTs Registration')

    # for external tools: nipype
    try:
        from nipype.interfaces.ants import Registration
        from nipype.interfaces.ants import ApplyTransforms
    except ImportError:
        print(
            'Error: Nipype and/or ANTS could not be imported, they are required'
            + ' in order to run this module. \n (aborting)')
        return None

    # make sure that saving related parameters are correct
    output_dir = _output_dir_4saving(
        output_dir, source_image)  # needed for intermediate results
    if save_data:
        transformed_source_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-def'))

        mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-map'))

        inverse_mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-invmap'))
        if overwrite is False \
            and os.path.isfile(transformed_source_file) \
            and os.path.isfile(mapping_file) \
            and os.path.isfile(inverse_mapping_file) :

            print("skip computation (use existing results)")
            output = {
                'transformed_source': load_volume(transformed_source_file),
                'mapping': load_volume(mapping_file),
                'inverse': load_volume(inverse_mapping_file)
            }
            return output

    # load and get dimensions and resolution from input images
    source = load_volume(source_image)
    src_affine = source.affine
    src_header = source.header
    nsx = source.header.get_data_shape()[X]
    nsy = source.header.get_data_shape()[Y]
    nsz = 1
    rsx = source.header.get_zooms()[X]
    rsy = source.header.get_zooms()[Y]
    rsz = 1

    target = load_volume(target_image)
    trg_affine = target.affine
    trg_header = target.header
    ntx = target.header.get_data_shape()[X]
    nty = target.header.get_data_shape()[Y]
    ntz = 1
    rtx = target.header.get_zooms()[X]
    rty = target.header.get_zooms()[Y]
    rtz = 1

    # in case the affine transformations are not to be trusted: make them equal
    if ignore_affine or ignore_header:
        mx = np.argmax(np.abs(src_affine[0][0:3]))
        my = np.argmax(np.abs(src_affine[1][0:3]))
        mz = np.argmax(np.abs(src_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rsx
            new_affine[1][1] = rsy
            new_affine[2][2] = rsz
            new_affine[0][3] = -rsx * nsx / 2.0
            new_affine[1][3] = -rsy * nsy / 2.0
            new_affine[2][3] = -rsz * nsz / 2.0
        else:
            new_affine[0][mx] = rsx * np.sign(src_affine[0][mx])
            new_affine[1][my] = rsy * np.sign(src_affine[1][my])
            new_affine[2][mz] = rsz * np.sign(src_affine[2][mz])
            if (np.sign(src_affine[0][mx]) < 0):
                new_affine[0][3] = rsx * nsx / 2.0
            else:
                new_affine[0][3] = -rsx * nsx / 2.0

            if (np.sign(src_affine[1][my]) < 0):
                new_affine[1][3] = rsy * nsy / 2.0
            else:
                new_affine[1][3] = -rsy * nsy / 2.0

            if (np.sign(src_affine[2][mz]) < 0):
                new_affine[2][3] = rsz * nsz / 2.0
            else:
                new_affine[2][3] = -rsz * nsz / 2.0
        #if (np.sign(src_affine[0][mx])<0): new_affine[mx][3] = rsx*nsx
        #if (np.sign(src_affine[1][my])<0): new_affine[my][3] = rsy*nsy
        #if (np.sign(src_affine[2][mz])<0): new_affine[mz][3] = rsz*nsz
        #new_affine[0][3] = nsx/2.0
        #new_affine[1][3] = nsy/2.0
        #new_affine[2][3] = nsz/2.0
        new_affine[3][3] = 1.0

        src_img = nb.Nifti1Image(source.get_data(), new_affine, source.header)
        src_img.update_header()
        src_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_srcimg'))
        save_volume(src_img_file, src_img)
        source = load_volume(src_img_file)
        src_affine = source.affine
        src_header = source.header

        # create generic affine aligned with the orientation for the target
        mx = np.argmax(np.abs(trg_affine[0][0:3]))
        my = np.argmax(np.abs(trg_affine[1][0:3]))
        mz = np.argmax(np.abs(trg_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rtx
            new_affine[1][1] = rty
            new_affine[2][2] = rtz
            new_affine[0][3] = -rtx * ntx / 2.0
            new_affine[1][3] = -rty * nty / 2.0
            new_affine[2][3] = -rtz * ntz / 2.0
        else:
            new_affine[0][mx] = rtx * np.sign(trg_affine[0][mx])
            new_affine[1][my] = rty * np.sign(trg_affine[1][my])
            new_affine[2][mz] = rtz * np.sign(trg_affine[2][mz])
            if (np.sign(trg_affine[0][mx]) < 0):
                new_affine[0][3] = rtx * ntx / 2.0
            else:
                new_affine[0][3] = -rtx * ntx / 2.0

            if (np.sign(trg_affine[1][my]) < 0):
                new_affine[1][3] = rty * nty / 2.0
            else:
                new_affine[1][3] = -rty * nty / 2.0

            if (np.sign(trg_affine[2][mz]) < 0):
                new_affine[2][3] = rtz * ntz / 2.0
            else:
                new_affine[2][3] = -rtz * ntz / 2.0
        #if (np.sign(trg_affine[0][mx])<0): new_affine[mx][3] = rtx*ntx
        #if (np.sign(trg_affine[1][my])<0): new_affine[my][3] = rty*nty
        #if (np.sign(trg_affine[2][mz])<0): new_affine[mz][3] = rtz*ntz
        #new_affine[0][3] = ntx/2.0
        #new_affine[1][3] = nty/2.0
        #new_affine[2][3] = ntz/2.0
        new_affine[3][3] = 1.0

        trg_img = nb.Nifti1Image(target.get_data(), new_affine, target.header)
        trg_img.update_header()
        trg_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_trgimg'))
        save_volume(trg_img_file, trg_img)
        target = load_volume(trg_img_file)
        trg_affine = target.affine
        trg_header = target.header

    # build coordinate mapping matrices and save them to disk
    src_coord = np.zeros((nsx, nsy, 2))
    trg_coord = np.zeros((ntx, nty, 2))
    for x in range(nsx):
        for y in range(nsy):
            src_coord[x, y, X] = x
            src_coord[x, y, Y] = y
    src_map = nb.Nifti1Image(src_coord, source.affine, source.header)
    src_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_srccoord'))
    save_volume(src_map_file, src_map)
    for x in range(ntx):
        for y in range(nty):
            trg_coord[x, y, X] = x
            trg_coord[x, y, Y] = y
    trg_map = nb.Nifti1Image(trg_coord, target.affine, target.header)
    trg_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_trgcoord'))
    save_volume(trg_map_file, trg_map)

    # run the main ANTS software
    reg = Registration()
    reg.inputs.dimension = 2

    # add a prefix to avoid multiple names?
    prefix = _fname_4saving(file_name=file_name,
                            rootfile=source_image,
                            suffix='tmp_syn')
    prefix = os.path.basename(prefix)
    prefix = prefix.split(".")[0]
    reg.inputs.output_transform_prefix = prefix
    reg.inputs.fixed_image = [target.get_filename()]
    reg.inputs.moving_image = [source.get_filename()]

    print("registering " + source.get_filename() + "\n to " +
          target.get_filename())

    if run_rigid is True and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]
                                                     ] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence
                                                            ] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [64, 64]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    if run_rigid is True and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ], [affine_iterations, affine_iterations, affine_iterations]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            affine_iterations, affine_iterations, affine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['SyN']
        reg.inputs.transform_parameters = [(0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [[
            coarse_iterations, coarse_iterations, medium_iterations,
            fine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[0]]
        reg.inputs.metric = ['CC']
        reg.inputs.metric_weight = [1.0]
        reg.inputs.radius_or_number_of_bins = [5]
        reg.inputs.shrink_factors = [[1]]
        reg.inputs.smoothing_sigmas = [[1]]

    print(reg.cmdline)
    result = reg.run()

    # Transforms the moving image
    at = ApplyTransforms()
    at.inputs.dimension = 2
    at.inputs.input_image = source.get_filename()
    at.inputs.reference_image = target.get_filename()
    at.inputs.interpolation = interpolation
    at.inputs.transforms = result.outputs.forward_transforms
    at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    print(at.cmdline)
    transformed = at.run()

    # Create coordinate mappings
    src_at = ApplyTransforms()
    src_at.inputs.dimension = 2
    src_at.inputs.input_image_type = 3
    src_at.inputs.input_image = src_map.get_filename()
    src_at.inputs.reference_image = target.get_filename()
    src_at.inputs.interpolation = 'Linear'
    src_at.inputs.transforms = result.outputs.forward_transforms
    src_at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    mapping = src_at.run()

    trg_at = ApplyTransforms()
    trg_at.inputs.dimension = 2
    trg_at.inputs.input_image_type = 3
    trg_at.inputs.input_image = trg_map.get_filename()
    trg_at.inputs.reference_image = source.get_filename()
    trg_at.inputs.interpolation = 'Linear'
    trg_at.inputs.transforms = result.outputs.reverse_transforms
    trg_at.inputs.invert_transform_flags = result.outputs.reverse_invert_flags
    inverse = trg_at.run()

    # pad coordinate mapping outside the image? hopefully not needed...

    # collect outputs and potentially save
    transformed_img = nb.Nifti1Image(
        nb.load(transformed.outputs.output_image).get_data(), target.affine,
        target.header)
    mapping_img = nb.Nifti1Image(
        nb.load(mapping.outputs.output_image).get_data(), target.affine,
        target.header)
    inverse_img = nb.Nifti1Image(
        nb.load(inverse.outputs.output_image).get_data(), source.affine,
        source.header)

    outputs = {
        'transformed_source': transformed_img,
        'mapping': mapping_img,
        'inverse': inverse_img
    }

    # clean-up intermediate files
    os.remove(src_map_file)
    os.remove(trg_map_file)
    if ignore_affine or ignore_header:
        os.remove(src_img_file)
        os.remove(trg_img_file)

    for name in result.outputs.forward_transforms:
        if os.path.exists(name): os.remove(name)
    for name in result.outputs.reverse_transforms:
        if os.path.exists(name): os.remove(name)
    os.remove(transformed.outputs.output_image)
    os.remove(mapping.outputs.output_image)
    os.remove(inverse.outputs.output_image)

    if save_data:
        save_volume(transformed_source_file, transformed_img)
        save_volume(mapping_file, mapping_img)
        save_volume(inverse_mapping_file, inverse_img)

    return outputs
示例#17
0
def registerVolumes(fixedImgFn,
                    movinImgFn,
                    regImgOutFn,
                    transformPrefix,
                    initialize=None,
                    regtype='nonlinear'):
    # Registration set up: for both Affine and SyN transforms
    reg = Registration()
    reg.inputs.fixed_image = fixedImgFn
    reg.inputs.moving_image = movinImgFn
    reg.inputs.output_transform_prefix = transformPrefix  # what does this line do?
    reg.inputs.interpolation = 'NearestNeighbor'
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = False  # what does this line do?
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = False
    reg.inputs.num_threads = 100
    reg.inputs.output_warped_image = regImgOutFn

    # Registration set up: Specify certain parameters for the Affine registration step
    reg.inputs.transforms = ['Affine']
    reg.inputs.transform_parameters = [(2.0, )]
    reg.inputs.number_of_iterations = [[1500, 200]]
    reg.inputs.metric = ['CC']
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [5]
    reg.inputs.convergence_threshold = [1.e-8]
    reg.inputs.convergence_window_size = [20]
    reg.inputs.smoothing_sigmas = [[1, 0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [
        True
    ]  # This is the default, but specify it anyway

    # Registration set up: SyN transforms only -- NEEDS TO BE CHECKED
    if regtype == 'nonlinear':
        reg.inputs.transforms.append('SyN')
        reg.inputs.transform_parameters.append((0.25, 3.0, 0.0))
        reg.inputs.number_of_iterations.append([100, 50, 30])
        reg.inputs.metric.append('CC')
        reg.inputs.metric_weight.append(1)
        reg.inputs.radius_or_number_of_bins.append(5)
        reg.inputs.convergence_threshold.append(1.e-9)
        reg.inputs.convergence_window_size.append(20)
        reg.inputs.smoothing_sigmas.append([2, 1, 0])
        reg.inputs.sigma_units.append('vox')
        reg.inputs.shrink_factors.append([3, 2, 1])
        reg.inputs.use_estimate_learning_rate_once.append(True)
        reg.inputs.use_histogram_matching.append(
            True)  # This is the default value, but specify it anyway

    # If the registration is initialized, set a few more parameters
    if initialize is not None:
        reg.inputs.initial_moving_transform = initialize
        reg.inputs.invert_initial_moving_transform = False

    # Keep the user updated with the status of the registration
    print("Starting", regtype, "registration for", regImgOutFn)
    # Run the registration
    reg.run()
    # Keep the user updated with the status of the registration
    print("Finished", regtype, "registration for", regImgOutFn)