def apply_filter(self, original_image, *args): r = original_image[:, :, 0] b = original_image[:, :, 2] r_boost_lower = self.channel_adjust(r, [ 0, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95, 1.0]) b_more = np.clip(b + 0.03, 0, 1.0) merged = np.stack([r_boost_lower, original_image[:, :, 1], b_more], axis=2) ''' Blurs the image. FFT is used here. ''' blurred = gaussian_filter(merged, 0.001) final = np.clip(merged * 1.3 - blurred * 0.3, 0, 1.0) b = final[:, :, 2] b_adjusted = self.channel_adjust(b, [ 0, 0.047, 0.198, 0.251, 0.318, 0.392, 0.42, 0.439, 0.475, 0.561, 0.58, 0.627, 0.671, 0.733, 0.847, 0.925, 1]) final[:, :, 2] = b_adjusted return final
def ssim(x: torch.Tensor, y: torch.Tensor, kernel_size: int = 11, kernel_sigma: float = 1.5, data_range: Union[int, float] = 1., reduction: str = 'mean', full: bool = False, k1: float = 0.01, k2: float = 0.03) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: r"""Interface of Structural Similarity (SSIM) index. Args: x: Batch of images. Required to be 2D (H, W), 3D (C,H,W), 4D (N,C,H,W) or 5D (N,C,H,W,2), channels first. y: Batch of images. Required to be 2D (H, W), 3D (C,H,W) 4D (N,C,H,W) or 5D (N,C,H,W,2), channels first. kernel_size: The side-length of the sliding window used in comparison. Must be an odd value. kernel_sigma: Sigma of normal distribution. data_range: Value range of input images (usually 1.0 or 255). reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, full: Return cs map or not. k1: Algorithm parameter, K1 (small constant, see [1]). k2: Algorithm parameter, K2 (small constant, see [1]). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results. Returns: Value of Structural Similarity (SSIM) index. In case of 5D input tensors, complex value is returned as a tensor of size 2. References: .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: From error visibility to structural similarity. IEEE Transactions on Image Processing, 13, 600-612. https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf, :DOI:`10.1109/TIP.2003.819861` https://github.com/photosynthesis-team/piq/blob/master/piq/ssim.py """ if isinstance(x, torch.ByteTensor) or isinstance(y, torch.ByteTensor): x = x.type(torch.float32) y = y.type(torch.float32) kernel = gaussian_filter(kernel_size, kernel_sigma).repeat(x.size(1), 1, 1, 1).to(y) _compute_ssim_per_channel = _ssim_per_channel_complex if x.dim() == 5 else _ssim_per_channel ssim_map, cs_map = _compute_ssim_per_channel(x=x, y=y, kernel=kernel, data_range=data_range, k1=k1, k2=k2) ssim_val = ssim_map.mean(1) cs = cs_map.mean(1) if reduction != 'none': reduction_operation = {'mean': torch.mean, 'sum': torch.sum} ssim_val = reduction_operation[reduction](ssim_val, dim=0) cs = reduction_operation[reduction](cs, dim=0) if full: return ssim_val, cs return ssim_val
def apply_filter(self, original_image, *args): r = original_image[:, :, 0] g = original_image[:, :, 1] b = original_image[:, :, 2] gray = rgb2gray(original_image) r = g = b = gray merged = np.stack([r,g, b], axis=2) # Apply the Gaussian Filter in the frequency domain to average the color values blurred = gaussian_filter(merged, 0.1) final = np.clip(merged + blurred*0.3, 0, 1.0) return final
def apply_filter(self, original_image, *args): r = original_image[100:, :, 0] b = original_image[100:, :, 2] r_boost_lower = self.channel_adjust(r, [ 0, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95, 1.0]) # b_more = np.clip(b + 0.2, 0, 1.0) merged = np.stack([r_boost_lower, original_image[100:, :, 1], b], axis=2) # Apply the Gaussian Filter in the frequency domain to average the color values blurred = gaussian_filter(merged, 0.1) final = np.clip(merged + blurred*0.3, 0, 1.0) b = final[100:, :,1] return final
def apply_filter(self, original_image, *args): r = original_image[:, :, 0] b = original_image[:, :, 2] r_boost_lower = self.channel_adjust(r, [ 0, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95, 1.0]) b_more = np.clip(b + 0.2, 0, 1.0) merged = np.stack([r_boost_lower, original_image[:, :, 1], b_more], axis=2) ## Note: This has been changed to use the custom-defined Gaussian filter using FFT blurred = gaussian_filter(merged, 0.1) final = np.clip(merged + blurred*0.3, 0, 1.0) b = final[:, :,1] return final
def generate_gaussian_octave(input, s, sigma): """ The initial image is incrementally convolved with Gaussian to produce images separated by a constant factor k in scale space. i.e. k=2**(1/s), where s is the number of images we want in each DoG octave. :param input: input image in a specific octave :param s: number of images in each DoG octave (5 was chosen in the paper) :param sigma: prior smoothing for each octave (1.6 was chosen in the paper) :return: """ octave = [input] k = 2**(1 / s) g_kernel = gaussian_filter(k * sigma) for i in range(s + 2): next_layer = cv2.filter2D(octave[-1], -1, g_kernel) octave.append(next_layer) return octave
def calcOrientation(img, kp): auxList = [] sigma = sigma_c * kp.scale radius = int(2 * np.ceil(sigma) + 1) hist = np.zeros(bins, dtype=np.float32) kernel = gaussian_filter(sigma) for i in range(-radius, radius + 1): y = kp.y + i if isOut(img, 1, y): continue for j in range(-radius, radius + 1): x = kp.x + j if isOut(img, x, 1): continue mag, theta = get_grad(img, x, y) weight = kernel[i + radius, j + radius] * mag binn = quantize_orientation(theta, bins) - 1 hist[binn] += weight maxBin = np.argmax(hist) maxBinVal = np.max(hist) kp.setDir(maxBin * 10) # checking if exist other valeus above 80% of the max #print ('->', hist) for binno, k in enumerate(hist): if binno == maxBin: continue if k > .85 * maxBinVal: nkp = handleKeypoints.KeyPoint(kp.x, kp.y, kp.scale, binno * 10) auxList.append(nkp) return auxList
#création de la table des snr snrframe = pd.DataFrame(index=['hard', 'soft']) #débruitage et optimisation du filtre Yh = np.zeros(Y.shape) Ys = np.zeros(Y.shape) Yg = np.zeros(Y.shape) parameters = np.linspace(1, 5, 600) #def filt(y, tau): # return utils.gaussian_filter(y, tau*sigma) for i in range(Y.shape[0]): mug = sc.opt(parameters, utils.gaussian_filter, Yb[i], Y[i]) Yg[i] = utils.gaussian_filter(Yb[i], mug*sigma) snrframe.loc[:,'gaussian'] = pd.Series(data=[utils.snr(Y, Yg), utils.snr(Y, Yg) ], index=snrframe.index) snrframe.loc[:,'noisy'] = pd.Series(data=[utils.snr(Y, Yb), utils.snr(Y, Yb) ], index=snrframe.index) for w in pywt.wavelist('db'): print w def filt_hard (y, tau): return utils.wave_hard_filter(y, sigma, tau, w) def filt_soft(y, tau): return utils.wave_soft_filter(y, sigma, tau, w) for i in range(Y.shape[0]): tauhard = sc.opt(parameters, filt_hard, Yb[i], Y[i]) tausoft = sc.opt(parameters, filt_soft, Yb[i], Y[i]) Yh[i] = utils.wave_hard_filter(Yb[i], sigma, tauhard, w) Ys[i] = utils.wave_soft_filter(Yb[i], sigma, tausoft, w) snrframe.loc[:,w] = pd.Series(data=[utils.snr(Y, Yh), utils.snr(Y, Ys) ], index=snrframe.index)
def anhir_method(source, target, echo=True): ##### Step 0 - Parameters, Smoothing and Initial Resampling ##### b_time_total = time.time() params = dict() params["echo"] = echo params["initial_alignment_size"] = 2048 params["centroid_rotation_size"] = 512 params["nonrigid_registration_size"] = 2048 params["gaussian_divider"] = 1.24 params['nr_method'] = "dm" # pd or dm nr_params = dict() nr_params['echo'] = echo nr_params['global_min_size'] = 64 nr_params['global_max_size'] = 512 nr_params['local_min_size'] = 64 nr_params['local_max_size'] = 768 nr_params['global_iterations'] = 100 nr_params['inner_iterations'] = 15 nr_params['outer_iterations'] = 5 nr_params['L_smooth'] = 1e7 nr_params['L_sigma'] = 1 nr_params['R_sigma'] = 1 nr_params['M_sigma'] = 2 nr_params['x_box'] = 19 nr_params['y_box'] = 19 nr_params['spacing'] = (1.0, 1.0) nr_params['update_mode'] = "composition" nr_params['gradient_mode'] = "symmetric" nr_params['diffusion_sigma'] = (2.0, 2.0) nr_params['fluid_sigma'] = (0.5, 0.5) nr_params['mind_sigma'] = (1.0, 1.0) nr_params['mind_radius'] = (2, 2) nr_params['early_stop'] = 10 return_dict = dict() initial_resample_ratio = utils.calculate_resample_size( source, target, max(params["initial_alignment_size"], params["nonrigid_registration_size"])) source = utils.gaussian_filter( source, initial_resample_ratio / params["gaussian_divider"]) target = utils.gaussian_filter( target, initial_resample_ratio / params["gaussian_divider"]) if echo: print() print("Registration start.") print() print("Source shape: ", source.shape) print("Target shape: ", target.shape) ##### Step 1 - Preprocessing ##### if echo: print() print("Preprocessing start.") print() b_time_r = time.time() p_source, p_target = utils.resample_both(source, target, initial_resample_ratio) e_time_r = time.time() tt_source = p_source.copy() if echo: print("Initially resampled source shape: ", p_source.shape) print("Initially resampled target shape: ", p_target.shape) print("Time for initial resampling: ", e_time_r - b_time_r, " seconds.") b_time_p = time.time() p_source, p_target, t_source, t_target, source_shift, target_shift = pp.preprocess( p_source, p_target, echo) e_time_p = time.time() return_dict["preprocessing_time"] = e_time_p - b_time_p if echo: print("Source shift: ", source_shift) print("Target shift: ", target_shift) print("Preprocessed source shape: ", p_source.shape) print("Preprocessed target shape: ", p_target.shape) print("Time for preprocessing: ", e_time_p - b_time_p, " seconds.") print() print("Preprocessing end.") print() ##### Step 2 - Initial Alignment ##### b_ia_time = time.time() if echo: print("Initial alignment start.") print() ia_resample_ratio = params["nonrigid_registration_size"] / params[ "initial_alignment_size"] to_cv_source, to_cv_target = utils.resample_both(p_source, p_target, ia_resample_ratio) cv_failed = False ct_failed = False ia_failed = False i_u_x, i_u_y, initial_transform, cv_failed = ia.cv_initial_alignment( to_cv_source, to_cv_target, echo) if cv_failed: if echo: print("CV failed.") print() print("CT start.") print() ia_resample_ratio = params["nonrigid_registration_size"] / params[ "centroid_rotation_size"] to_ct_source, to_ct_target = utils.resample_both( p_source, p_target, ia_resample_ratio) i_u_x, i_u_y, initial_transform, ct_failed = ia.ct_initial_alignment( to_ct_source, to_ct_target, echo) if ct_failed: if echo: print() print("CT failed.") print("Initial alignment failed.") ia_failed = True if ia_failed: i_u_x, i_u_y = np.zeros(p_source.shape), np.zeros(p_target.shape) else: y_size, x_size = np.shape(p_source) i_u_x, i_u_y = utils.resample_displacement_field( i_u_x, i_u_y, x_size, y_size) e_ia_time = time.time() return_dict["cv_failed"] = cv_failed return_dict["ct_failed"] = ct_failed return_dict["ia_failed"] = ia_failed return_dict["initial_alignment_time"] = e_ia_time - b_ia_time if echo: print() print("Elapsed time for initial alignment: ", e_ia_time - b_ia_time, " seconds.") print("Initial alignment end.") print() ia_source = utils.warp_image(p_source, i_u_x, i_u_y) u_x_g, u_y_g = nr.partial_data_registration_global(ia_source, p_target, nr_params) u_x_g, u_y_g = utils.compose_vector_fields(i_u_x, i_u_y, u_x_g, u_y_g) ng_source = utils.warp_image(p_source, u_x_g, u_y_g) success = fd.detect_mind_failure(ia_source, p_target, ng_source, echo) if not success: u_x_g, u_y_g = i_u_x, i_u_y ng_source = ia_source return_dict["ng_failed"] = not success ##### Step 3 - Nonrigid Registration ##### b_nr_time = time.time() if echo: print("Nonrigid registration start.") print() if params['nr_method'] == "dm": u_x_nr, u_y_nr = nr.dm(ng_source, p_target, nr_params) u_x_nr, u_y_nr = utils.compose_vector_fields(u_x_g, u_y_g, u_x_nr, u_y_nr) nr_source = utils.warp_image(p_source, u_x_nr, u_y_nr) elif params['nr_method'] == "pd": u_x_nr, u_y_nr = nr.partial_data_registration_local( ng_source, p_target, nr_params) u_x_nr, u_y_nr = utils.compose_vector_fields(u_x_g, u_y_g, u_x_nr, u_y_nr) nr_source = utils.warp_image(p_source, u_x_nr, u_y_nr) e_nr_time = time.time() return_dict["nonrigid_registration_time"] = e_nr_time - b_nr_time if echo: print() print("Elapsed time for nonrigid registration: ", e_nr_time - b_nr_time, " seconds.") print("Nonrigid registration end.") print() ##### Step 4 - Warping function creation ##### def warp_original_landmarks(source_landmarks): source_landmarks = source_landmarks / initial_resample_ratio source_landmarks = utils.pad_landmarks(source_landmarks, target_shift[0], target_shift[2]) source_landmarks = utils.transform_landmarks(source_landmarks, u_x_nr, u_y_nr) source_l_x, source_r_x, source_l_y, source_r_y = source_shift target_l_x, target_r_x, target_l_y, target_r_y = target_shift source_landmarks[:, 0] = source_landmarks[:, 0] - source_l_x source_landmarks[:, 1] = source_landmarks[:, 1] - source_l_y out_y_size, out_x_size = np.shape(source) in_y_size, in_x_size = np.shape(tt_source) source_landmarks[:, 0] = source_landmarks[:, 0] * out_x_size / in_x_size source_landmarks[:, 1] = source_landmarks[:, 1] * out_y_size / in_y_size return source_landmarks def warp_resampled_landmarks(source_landmarks, target_landmarks): source_landmarks = source_landmarks / initial_resample_ratio target_landmarks = target_landmarks / initial_resample_ratio source_landmarks = utils.pad_landmarks(source_landmarks, target_shift[0], target_shift[2]) target_landmarks = utils.pad_landmarks(target_landmarks, source_shift[0], source_shift[2]) transformed_source_landmarks = utils.transform_landmarks( source_landmarks, u_x_nr, u_y_nr) return source_landmarks, transformed_source_landmarks, target_landmarks e_time_total = time.time() return_dict["total_time"] = e_time_total - b_time_total if echo: print("Total registration time: ", e_time_total - b_time_total, " seconds.") print("End of registration.") print() return p_source, p_target, ia_source, ng_source, nr_source, i_u_x, i_u_y, u_x_nr, u_y_nr, warp_resampled_landmarks, warp_original_landmarks, return_dict
binWidth = int(360 / bins) windowSize = 16 sigma = windowSize / 6 radius = int(windowSize / 2) toRadVal = 180.0 / np.pi hist = [[np.zeros(bins, dtype=np.float32) for _ in range(4)] for _ in range(4)] for kp in kpList: i = 0 cx, cy, s = kp.x, kp.y, kp.scale kpDir = kp.dir theta = kpDir * np.pi / 180.0 kernel = gaussian_filter(windowSize / 6) t, l = max(0, cy - windowSize // 2), max(0, cx - windowSize // 2) b, r = min(img.shape[0], cy + windowSize // 2 + 1), min(img.shape[1], cx + windowSize // 2 + 1) patch = img[t:b, l:r] dx, dy = get_patch_grads(patch) if dx.shape[0] < windowSize + 1: if t == 0: kernel = kernel[kernel.shape[0] - dx.shape[0]:] else: kernel = kernel[:dx.shape[0]] if dx.shape[1] < windowSize + 1: if l == 0: kernel = kernel[kernel.shape[1] - dx.shape[1]:] else: kernel = kernel[:dx.shape[1]]
def apply_filter(self, original_image, amount, *args): blurred = gaussian_filter(original_image, 1/amount) return blurred