Exemplo n.º 1
0
def _test_be(moving_image_id, reg):
    img = image_registration.img_data(moving_image_id, util.DATA_FOLDER,
                                      util.TEMP_FOLDER_PATH)
    img = image_registration.pre_process(img, False)

    resampled_file = img.pre_processed_filepath
    name = splitext(splitext(basename(resampled_file))[0])[0] + "_bet"
    img.pre_processed_filepath = util.TEMP_FOLDER_PATH + "/res/" +\
        splitext(basename(resampled_file))[0] +\
        '_bet.nii.gz'

    reg.inputs.fixed_image = resampled_file
    reg.inputs.fixed_image_mask = img.label_inv_filepath
    reg.inputs.output_transform_prefix = util.TEMP_FOLDER_PATH + name
    reg.inputs.output_warped_image = util.TEMP_FOLDER_PATH + name + '_betReg.nii'
    transform = util.TEMP_FOLDER_PATH + name + 'InverseComposite.h5'

    reg.run()

    img.init_transform = transform

    reg_volume = util.transform_volume(resampled_file, transform)

    mult = ants.MultiplyImages()
    mult.inputs.dimension = 3
    mult.inputs.first_input = reg_volume
    mult.inputs.second_input = image_registration.TEMPLATE_MASK
    mult.inputs.output_product_image = img.pre_processed_filepath
    mult.run()

    util.generate_image(img.pre_processed_filepath,
                        image_registration.TEMPLATE_VOLUME)
def _test_be(moving_image_id, reg):
    img = image_registration.img_data(moving_image_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
    img = image_registration.pre_process(img, False)

    resampled_file = img.pre_processed_filepath
    name = splitext(splitext(basename(resampled_file))[0])[0] + "_bet"
    img.pre_processed_filepath = util.TEMP_FOLDER_PATH + "/res/" +\
        splitext(basename(resampled_file))[0] +\
        '_bet.nii.gz'

    reg.inputs.fixed_image = resampled_file
    reg.inputs.fixed_image_mask = img.label_inv_filepath
    reg.inputs.output_transform_prefix = util.TEMP_FOLDER_PATH + name
    reg.inputs.output_warped_image = util.TEMP_FOLDER_PATH + name + '_betReg.nii'
    transform = util.TEMP_FOLDER_PATH + name + 'InverseComposite.h5'

    reg.run()

    img.init_transform = transform

    reg_volume = util.transform_volume(resampled_file, transform)

    mult = ants.MultiplyImages()
    mult.inputs.dimension = 3
    mult.inputs.first_input = reg_volume
    mult.inputs.second_input = image_registration.TEMPLATE_MASK
    mult.inputs.output_product_image = img.pre_processed_filepath
    mult.run()

    util.generate_image(img.pre_processed_filepath, image_registration.TEMPLATE_VOLUME)
def post_calculations(moving_dataset_image_ids, result=None):
    """ Transform images and calculate avg"""
    if result is None:
        result = {}

    for _id in moving_dataset_image_ids:
        img = img_data(_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.load_db_transforms()

        img_pre = img_data(img.fixed_image, util.DATA_FOLDER,
                           util.TEMP_FOLDER_PATH)
        img_pre.load_db_transforms()

        reg_vol = util.transform_volume(img.reg_img_filepath,
                                        img_pre.get_transforms())
        vol = util.TEMP_FOLDER_PATH + util.get_basename(
            basename(reg_vol)) + '_BE.nii.gz'

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_vol
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = vol
        mult.run()

        label = "img"
        if label in result:
            result[label].append(vol)
        else:
            result[label] = [vol]

        for (segmentation, label) in util.find_reg_label_images(_id):
            segmentation = util.transform_volume(segmentation,
                                                 img_pre.get_transforms(),
                                                 label_img=True)
            if label in result:
                result[label].append(segmentation)
            else:
                result[label] = [segmentation]
    return result
def move_vol(moving, transform, label_img=False):
    """ Move data with transform """
    if label_img:
        # resample volume to 1 mm slices
        target_affine_3x3 = np.eye(3) * 1
        img_3d_affine = resample_img(moving, target_affine=target_affine_3x3,
                                     interpolation='nearest')
        resampled_file = util.TEMP_FOLDER_PATH + util.get_basename(moving) + '_resample.nii.gz'
        # pylint: disable= no-member
        img_3d_affine.to_filename(resampled_file)

    else:
        img = img_data(-1, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.set_img_filepath(moving)
        resampled_file = pre_process(img, False).pre_processed_filepath

    result = util.transform_volume(moving, transform, label_img)
    util.generate_image(result, util.TEMPLATE_VOLUME)
    return result
Exemplo n.º 5
0
def move_vol(moving, transform, label_img=False, slice_size=1, ref_img=None):
    """ Move data with transform """
    if label_img:
        # resample volume to 1 mm slices
        target_affine_3x3 = np.eye(3) * slice_size
        img_3d_affine = resample_img(moving,
                                     target_affine=target_affine_3x3,
                                     interpolation='nearest')
        resampled_file = util.TEMP_FOLDER_PATH + util.get_basename(
            moving) + '_resample.nii.gz'
        # pylint: disable= no-member
        img_3d_affine.to_filename(resampled_file)
        del img_3d_affine
    else:
        img = img_data(-1, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.set_img_filepath(moving)
        resampled_file = pre_process(img, False).pre_processed_filepath

    result = util.transform_volume(resampled_file,
                                   transform,
                                   label_img,
                                   ref_img=ref_img)
    util.generate_image(result, util.TEMPLATE_VOLUME)
    return result
Exemplo n.º 6
0
def pre_process(img, do_bet=True, slice_size=1, reg_type=None, be_method=None):
    # pylint: disable= too-many-statements, too-many-locals, too-many-branches
    """ Pre process the data"""
    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.num_threads = NUM_THREADS_ANTS
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data / np.amax(temp_data) * 100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)
    del temp_img

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * slice_size
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if be_method == 0:
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        else:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[
            15000, 12000, 10000, 10000, 10000, 5000, 5000
        ], [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[19, 16, 12, 9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[10, 10, 10, 8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        reg.run()
        util.LOGGER.info("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif be_method == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command=BET_COMMAND)
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif be_method == 2:
        if BET_FRAC > 0:
            name = util.get_basename(resampled_file) + "_bet"
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
            bet = fsl.BET(command=BET_COMMAND)
            bet.inputs.in_file = resampled_file
            # pylint: disable= pointless-string-statement
            """ fractional intensity threshold (0->1); default=0.5;
            smaller values give larger brain outline estimates"""
            bet.inputs.frac = BET_FRAC
            """ vertical gradient in fractional intensity threshold (-1->1);
            default=0; positive values give larger brain outline at bottom,
            smaller at top """
            bet.inputs.vertical_gradient = 0
            """  This attempts to reduce image bias, and residual neck voxels.
            This can be useful when running SIENA or SIENAX, for example.
            Various stages involving FAST segmentation-based bias field removal
            and standard-space masking are combined to produce a result which
            can often give better results than just running bet2."""
            bet.inputs.reduce_bias = True
            bet.inputs.mask = True
            bet.inputs.out_file = path + name + '.nii.gz'
            util.LOGGER.info("starting bet registration")
            start_time = datetime.datetime.now()
            util.LOGGER.info(bet.cmdline)
            if not os.path.exists(bet.inputs.out_file):
                bet.run()
            util.LOGGER.info("Finished bet registration 0: ")
            util.LOGGER.info(datetime.datetime.now() - start_time)
            name += "_be"
            print('OR HERE 3??????')
            moving_image = util.TEMPLATE_MASKED_VOLUME
            print(moving_image)
            fixed_image = bet.inputs.out_file
        else:
            name = util.get_basename(resampled_file) + "_be"
            moving_image = util.TEMPLATE_VOLUME
            fixed_image = resampled_file
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'
        img.pre_processed_filepath = path + name + '.nii.gz'
        print('DO I GET HERE??????')
        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = fixed_image
        print('OR HERE 1??????')
        print(moving_image)
        reg.inputs.moving_image = moving_image
        print('OR HERE 2??????')
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        elif reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'
        print('FINISHED SETTING UP ALL reg.inputs')
        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        util.LOGGER.info(reg.cmdline)
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        util.LOGGER.info("Finished be registration: ")
        util.LOGGER.info(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        brain_mask = util.TEMPLATE_MASK
        #brain_mask = img.reg_brainmask_filepath
        if not brain_mask:
            brain_mask = util.TEMPLATE_MASK
        print("Using brain mask " + brain_mask)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = brain_mask
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    else:
        util.LOGGER.error(" INVALID BE METHOD!!!!")

    util.LOGGER.info("---BET " + img.pre_processed_filepath)
    return img
Exemplo n.º 7
0
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements, too-many-branches
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(
        moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(
        moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(
            init_moving_transform):
        util.LOGGER.info("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform,
                                     label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = NUM_THREADS_ANTS
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid', 'Rigid', 'Rigid']
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[12, 9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[9, 8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == AFFINE or reg_type == COMPOSITEAFFINE or reg_type == SIMILARITY:
        if reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine', 'Affine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity', 'Similarity']
        else:
            reg.inputs.transforms = [
                'Rigid', 'CompositeAffine', 'CompositeAffine'
            ]
        reg.inputs.metric = ['MI', 'MI', 'MI']
        reg.inputs.metric_weight = [1] * 2 + [1]
        reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [None]
        reg.inputs.sampling_percentage = [0.5] * 2 + [None]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [75, 50, 50]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [75, 50]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 3
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.25, )]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[
                10000, 10000, 1000, 1000, 1000
            ], [10000, 10000, 1000, 1000, 1000], [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1],
                                         [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0],
                                           [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0],
                                           [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox'] * 3
        reg.inputs.transform_parameters = [(0.25, ), (0.25, ), (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    util.LOGGER.info("starting registration")
    start_time = datetime.datetime.now()
    util.LOGGER.info(reg.cmdline)
    reg.run()
    util.LOGGER.info("Finished registration: ")
    util.LOGGER.info(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath,
                          transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img
def pre_process(img, do_bet=True):
    # pylint: disable= too-many-statements, too-many-locals
    """ Pre process the data"""

    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    if os.path.exists(img.pre_processed_filepath) and\
       (os.path.exists(path + name + 'Composite.h5') or BE_METHOD == 1):
        if BE_METHOD == 0:
            img.init_transform = path + name + 'Composite.h5'
        util.generate_image(img.pre_processed_filepath, util.TEMPLATE_VOLUME)
        return img

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data/np.amax(temp_data)*100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * 1
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if BE_METHOD == 0:
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 1
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000],
                                            [10000, 10000, 10000, 10000]])

        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        reg.run()
        print("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif BE_METHOD == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif BE_METHOD == 2:
        name = util.get_basename(resampled_file) + "_bet"

        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command="fsl5.0-bet")
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.1
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        bet.inputs.reduce_bias = True
        bet.inputs.mask = True
        bet.inputs.out_file = path + name + '.nii.gz'
        print("starting bet registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(bet.inputs.out_file):
            bet.run()
        print("Finished bet registration 0: ")
        print(datetime.datetime.now() - start_time)

        name = name + "_be"
        img.pre_processed_filepath = path + name + '.nii.gz'
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = bet.inputs.out_file
        reg.inputs.moving_image = util.TEMPLATE_MASKED_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = 8
        reg.inputs.initial_moving_transform_com = True

        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25,), (0.25,)]
        reg.inputs.convergence_threshold = [1.e-6]*2
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        print("starting be registration")
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        print("Finished be registration: ")
        print(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)

    print("---BET", img.pre_processed_filepath)
    return img
def registration(moving_img, fixed, reg_type):
    # pylint: disable= too-many-statements
    """Image2Image registration """
    reg = ants.Registration()

    path = moving_img.temp_data_path
    name = util.get_basename(moving_img.pre_processed_filepath) + '_' + reg_type
    moving_img.processed_filepath = path + name + '_RegTo' + str(moving_img.fixed_image) + '.nii.gz'
    moving_img.transform = path + name + '_RegTo' + str(moving_img.fixed_image) + '.h5'

    init_moving_transform = moving_img.init_transform
    if init_moving_transform is not None and os.path.exists(init_moving_transform):
        print("Found initial transform")
        # reg.inputs.initial_moving_transform = init_moving_transform
        reg.inputs.initial_moving_transform_com = False
        mask = util.transform_volume(moving_img.label_inv_filepath,
                                     moving_img.init_transform, label_img=True)
    else:
        reg.inputs.initial_moving_transform_com = True
        mask = moving_img.label_inv_filepath
    reg.inputs.collapse_output_transforms = True
    reg.inputs.fixed_image = moving_img.pre_processed_filepath
    reg.inputs.fixed_image_mask = mask
    reg.inputs.moving_image = fixed
    reg.inputs.num_threads = 8
    if reg_type == RIGID:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.metric = ['MI']
        reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.convergence_window_size = [5]
        reg.inputs.number_of_iterations = ([[10000, 10000, 10000, 10000, 10000]])
        reg.inputs.shrink_factors = [[5, 4, 3, 2, 1]]
        reg.inputs.smoothing_sigmas = [[4, 3, 2, 1, 0]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.transform_parameters = [(0.25,)]
        reg.inputs.use_histogram_matching = [True]
        reg.inputs.metric_weight = [1.0]
    elif reg_type == AFFINE:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] + [-0.01]
        reg.inputs.sigma_units = ['vox']*2
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 2
        reg.inputs.use_histogram_matching = [False, True]
    elif reg_type == SYN:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.metric = ['MI', 'MI', ['MI', 'CC']]
        reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
        reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
        reg.inputs.convergence_window_size = [5, 5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
        reg.inputs.sampling_percentage = [0.5] * 2 + [[None, None]]
        if reg.inputs.initial_moving_transform_com:
            reg.inputs.number_of_iterations = ([[10000, 10000, 1000, 1000, 1000],
                                                [10000, 10000, 1000, 1000, 1000],
                                                [100, 75, 75, 75]])
            reg.inputs.shrink_factors = [[9, 5, 3, 2, 1], [5, 4, 3, 2, 1], [5, 3, 2, 1]]
            reg.inputs.smoothing_sigmas = [[8, 4, 2, 1, 0], [4, 3, 2, 1, 0], [4, 2, 1, 0]]
        else:
            reg.inputs.number_of_iterations = ([[5000, 5000, 1000, 500],
                                                [5000, 5000, 1000, 500],
                                                [100, 90, 75]])
            reg.inputs.shrink_factors = [[7, 5, 2, 1], [4, 3, 2, 1], [4, 2, 1]]
            reg.inputs.smoothing_sigmas = [[6, 4, 1, 0], [3, 2, 1, 0], [1, 0.5, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2 + [-0.01]
        reg.inputs.sigma_units = ['vox']*3
        reg.inputs.transform_parameters = [(0.25,),
                                           (0.25,),
                                           (0.2, 3.0, 0.0)]
        reg.inputs.use_estimate_learning_rate_once = [True] * 3
        reg.inputs.use_histogram_matching = [False, False, True]
    else:
        raise Exception("Wrong registration format " + reg_type)
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.write_composite_transform = True
    reg.inputs.output_transform_prefix = path + name
    transform = path + name + 'InverseComposite.h5'

    if os.path.exists(moving_img.processed_filepath) and\
       os.path.exists(moving_img.transform):
        # generate_image(reg.inputs.output_warped_image, fixed)
        return moving_img
    print("starting registration")
    start_time = datetime.datetime.now()
    reg.run()
    print("Finished registration: ")
    print(datetime.datetime.now() - start_time)

    util.transform_volume(moving_img.pre_processed_filepath, transform,
                          outputpath=moving_img.processed_filepath)
    shutil.copy(transform, moving_img.transform)
    util.generate_image(moving_img.processed_filepath, fixed)

    return moving_img