def init_smoother(self, opt): #the output displacement is defined on the transformation map [-1,1] self.flow_smoother = SF.SmootherFactory( self.img_sz, self.double_spacing).create_smoother(opt) opt_cp = copy.deepcopy(opt) opt_cp["smoother"]["type"] = "gaussian" opt_cp["smoother"]["gaussian_std"] = 0.1 self.mask_smoother = SF.SmootherFactory( self.img_sz, self.double_spacing).create_smoother(opt_cp)
def createImage(self, ex_len=64): example_img_len = ex_len dim = 2 szEx = np.tile(example_img_len, dim) # size of the desired images: (sz)^dim I0, I1, self.spacing = eg.CreateSquares(dim).create_image_pair( szEx, self.params) # create a default image size with two sample squares self.sz = np.array(I0.shape) # create the source and target image as pyTorch variables self.ISource = AdaptVal(torch.from_numpy(I0.copy())) self.ITarget = AdaptVal(torch.from_numpy(I1)) # smooth both a little bit self.params[('image_smoothing', {}, 'image smoothing settings')] self.params['image_smoothing'][( 'smooth_images', True, '[True|False]; smoothes the images before registration')] self.params['image_smoothing'][('smoother', {}, 'settings for the image smoothing')] self.params['image_smoothing']['smoother'][( 'gaussian_std', 0.05, 'how much smoothing is done')] self.params['image_smoothing']['smoother'][( 'type', 'gaussian', "['gaussianSpatial'|'gaussian'|'diffusion']")] cparams = self.params['image_smoothing'] s = SF.SmootherFactory(self.sz[2::], self.spacing).create_smoother(cparams) self.ISource = s.smooth(self.ISource) self.ITarget = s.smooth(self.ITarget)
def add_texture_on_img(im_orig, texture_gaussian_smoothness=0.1, texture_magnitude=0.3): # do this separately for each integer intensity level levels = np.unique((np.floor(im_orig)).astype('int')) im = np.zeros_like(im_orig) for current_level in levels: sz = im_orig.shape rand_noise = np.random.random(sz[2:]).astype('float32') - 0.5 rand_noise = rand_noise.view().reshape(sz) r_params = pars.ParameterDict() r_params['smoother']['type'] = 'gaussian' r_params['smoother']['gaussian_std'] = texture_gaussian_smoothness spacing = 1.0 / (np.array(sz[2:]).astype('float32') - 1) s_r = sf.SmootherFactory(sz[2::], spacing).create_smoother(r_params) rand_noise_smoothed = s_r.smooth(AdaptVal( torch.from_numpy(rand_noise))).detach().cpu().numpy() rand_noise_smoothed /= rand_noise_smoothed.max() rand_noise_smoothed *= texture_magnitude c_indx = (im_orig >= current_level - 0.5) im[c_indx] = im_orig[c_indx] + rand_noise_smoothed[c_indx] return torch.Tensor(im)
def get_single_gaussian_smoother(gaussian_std, sz, spacing): s_m_params = pars.ParameterDict() s_m_params['smoother']['type'] = 'gaussian' s_m_params['smoother']['gaussian_std'] = gaussian_std s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params) return s_m
sz = np.array(I0.shape) assert (len(sz) == ds.dim + 2) print('Spacing = ' + str(spacing)) # create the source and target image as pyTorch variables ISource = AdaptVal(torch.from_numpy(I0.copy())) ITarget = AdaptVal(torch.from_numpy(I1)) # if desired we smooth them a little bit if ds.smooth_images: # smooth both a little bit params['image_smoothing'] = ds.par_algconf['image_smoothing'] cparams = params['image_smoothing'] s = SF.SmootherFactory(sz[2::], spacing).create_smoother(cparams) ISource = s.smooth(ISource) ITarget = s.smooth(ITarget) ##############################3 # Setting up the optimizer # ^^^^^^^^^^^^^^^^^^^^^^^^ # # We instantiate the multi-scale optimizer, which requires knowledge about image size and spacing, # as well as if a map will be used for computation (for this example we do not use one). # # this custom registration algorithm does not use a map, so set it to False use_map = False # If a map would be used we could compute at a lower resolution internally. map_low_res_factor = None
def do_registration( I0_name, I1_name, visualize, visualize_step, use_multi_scale, normalize_spacing, normalize_intensities, squeeze_image, par_algconf ): from mermaid.data_wrapper import AdaptVal import mermaid.smoother_factory as SF import mermaid.multiscale_optimizer as MO from mermaid.config_parser import nr_of_threads params = pars.ParameterDict() par_image_smoothing = par_algconf['algconf']['image_smoothing'] par_model = par_algconf['algconf']['model'] par_optimizer = par_algconf['algconf']['optimizer'] use_map = par_model['deformation']['use_map'] map_low_res_factor = par_model['deformation']['map_low_res_factor'] model_name = par_model['deformation']['name'] if use_map: model_name = model_name + '_map' else: model_name = model_name + '_image' # general parameters params['model']['registration_model'] = par_algconf['algconf']['model']['registration_model'] torch.set_num_threads( nr_of_threads ) print('Number of pytorch threads set to: ' + str(torch.get_num_threads())) I0, I1, spacing, md_I0, md_I1 = read_images( I0_name, I1_name, normalize_spacing, normalize_intensities,squeeze_image ) sz = I0.shape # create the source and target image as pyTorch variables ISource = AdaptVal(torch.from_numpy(I0.copy())) ITarget = AdaptVal(torch.from_numpy(I1)) smooth_images = par_image_smoothing['smooth_images'] if smooth_images: # smooth both a little bit params['image_smoothing'] = par_algconf['algconf']['image_smoothing'] cparams = params['image_smoothing'] s = SF.SmootherFactory(sz[2::], spacing).create_smoother(cparams) ISource = s.smooth_scalar_field(ISource) ITarget = s.smooth_scalar_field(ITarget) if not use_multi_scale: # create multi-scale settings for single-scale solution multi_scale_scale_factors = [1.0] multi_scale_iterations_per_scale = [par_optimizer['single_scale']['nr_of_iterations']] else: multi_scale_scale_factors = par_optimizer['multi_scale']['scale_factors'] multi_scale_iterations_per_scale = par_optimizer['multi_scale']['scale_iterations'] mo = MO.MultiScaleRegistrationOptimizer(sz, spacing, use_map, map_low_res_factor, params) optimizer_name = par_optimizer['name'] mo.set_optimizer_by_name(optimizer_name) mo.set_visualization(visualize) mo.set_visualize_step(visualize_step) mo.set_model(model_name) mo.set_source_image(ISource) mo.set_target_image(ITarget) mo.set_scale_factors(multi_scale_scale_factors) mo.set_number_of_iterations_per_scale(multi_scale_iterations_per_scale) # and now do the optimization mo.optimize() optimized_energy = mo.get_energy() warped_image = mo.get_warped_image() optimized_map = mo.get_map() optimized_reg_parameters = mo.get_model_parameters() return warped_image, optimized_map, optimized_reg_parameters, optimized_energy, params, md_I0