def _test_be(moving_image_id, reg):
    img = image_registration.img_data(moving_image_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
    img = image_registration.pre_process(img, False)

    resampled_file = img.pre_processed_filepath
    name = splitext(splitext(basename(resampled_file))[0])[0] + "_bet"
    img.pre_processed_filepath = util.TEMP_FOLDER_PATH + "/res/" +\
        splitext(basename(resampled_file))[0] +\
        '_bet.nii.gz'

    reg.inputs.fixed_image = resampled_file
    reg.inputs.fixed_image_mask = img.label_inv_filepath
    reg.inputs.output_transform_prefix = util.TEMP_FOLDER_PATH + name
    reg.inputs.output_warped_image = util.TEMP_FOLDER_PATH + name + '_betReg.nii'
    transform = util.TEMP_FOLDER_PATH + name + 'InverseComposite.h5'

    reg.run()

    img.init_transform = transform

    reg_volume = util.transform_volume(resampled_file, transform)

    mult = ants.MultiplyImages()
    mult.inputs.dimension = 3
    mult.inputs.first_input = reg_volume
    mult.inputs.second_input = image_registration.TEMPLATE_MASK
    mult.inputs.output_product_image = img.pre_processed_filepath
    mult.run()

    util.generate_image(img.pre_processed_filepath, image_registration.TEMPLATE_VOLUME)
    def generate_image(self):
        '''
        Generate an image with the CPPN

        returns: list (list float)
        '''
        genome = self.pop.statistics.best_genome()
        return util.generate_image(genome)
def move_vol(moving, transform, label_img=False):
    """ Move data with transform """
    if label_img:
        # resample volume to 1 mm slices
        target_affine_3x3 = np.eye(3) * 1
        img_3d_affine = resample_img(moving, target_affine=target_affine_3x3,
                                     interpolation='nearest')
        resampled_file = util.TEMP_FOLDER_PATH + util.get_basename(moving) + '_resample.nii.gz'
        # pylint: disable= no-member
        img_3d_affine.to_filename(resampled_file)

    else:
        img = img_data(-1, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.set_img_filepath(moving)
        resampled_file = pre_process(img, False).pre_processed_filepath

    result = util.transform_volume(moving, transform, label_img)
    util.generate_image(result, util.TEMPLATE_VOLUME)
    return result
    def generate_images_for_all_genomes(self):
        '''
        Generate an image with the CPPN for each genome

        returns: list (list (list float))
        '''
        images = []
        for specie in self.pop.species:
            for genome in specie.members:
                images.append(util.generate_image(genome))
        return images
def pre_process(img, do_bet=True):
    # pylint: disable= too-many-statements, too-many-locals
    """ Pre process the data"""

    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    if os.path.exists(img.pre_processed_filepath) and\
       (os.path.exists(path + name + 'Composite.h5') or BE_METHOD == 1):
        if BE_METHOD == 0:
            img.init_transform = path + name + 'Composite.h5'
        util.generate_image(img.pre_processed_filepath, util.TEMPLATE_VOLUME)
        return img

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data/np.amax(temp_data)*100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * 1
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if BE_METHOD == 0:
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 1
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000],
                                            [10000, 10000, 10000, 10000]])

        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        reg.run()
        print("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif BE_METHOD == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif BE_METHOD == 2:
        name = util.get_basename(resampled_file) + "_bet"

        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.1
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        bet.inputs.reduce_bias = True
        bet.inputs.mask = True
        bet.inputs.out_file = path + name + '.nii.gz'
        print("starting bet registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(bet.inputs.out_file):
            bet.run()
        print("Finished bet registration 0: ")
        print(datetime.datetime.now() - start_time)

        name = name + "_be"
        img.pre_processed_filepath = path + name + '.nii.gz'
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = bet.inputs.out_file
        reg.inputs.moving_image = util.TEMPLATE_MASKED_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 8
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        print("Finished be registration: ")
        print(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)

    print("---BET", img.pre_processed_filepath)
    return img
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(init_moving_transform):
        print("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform, label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = 8
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.metric = ['MI']
        reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.convergence_window_size = [5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000, 10000]])
        reg.inputs.shrink_factors = [[5, 4, 3, 2, 1]]
        reg.inputs.smoothing_sigmas = [[4, 3, 2, 1, 0]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.transform_parameters = [(0.25,)]
        reg.inputs.use_histogram_matching = [True]
        reg.inputs.metric_weight = [1.0]
    elif reg_type == AFFINE:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] + [-0.01]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 2
        reg.inputs.use_histogram_matching = [False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000],
                                                [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1], [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0], [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0], [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox']*3
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,),
                                           (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    print("starting registration")
    start_time = datetime.datetime.now()
    reg.run()
    print("Finished registration: ")
    print(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath, transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img
 def fitness_func(generator, genome):
     x = numpy.array([ util.generate_image(genome).flatten() for x in range(5)])
     y =  [ v[0] for v in d.forward(x) ]
     return sum(y) / float( len(y) )
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements, too-many-branches
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(
        moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(
            init_moving_transform):
        util.LOGGER.info("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform,
                                     label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = NUM_THREADS_ANTS
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid', 'Rigid', 'Rigid']
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[12, 9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[9, 8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == AFFINE or reg_type == COMPOSITEAFFINE or reg_type == SIMILARITY:
        if reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine', 'Affine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity', 'Similarity']
        else:
            reg.inputs.transforms = [
                'Rigid', 'CompositeAffine', 'CompositeAffine'
            ]
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    util.LOGGER.info("starting registration")
    start_time = datetime.datetime.now()
    util.LOGGER.info(reg.cmdline)
    reg.run()
    util.LOGGER.info("Finished registration: ")
    util.LOGGER.info(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath,
                          transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img