def bijiao(img_1, img_2): psnr = metrics.peak_signal_noise_ratio(img_1, img_2) mse = metrics.mean_squared_error(img_1, img_2) ssim = metrics.structural_similarity(img_1, img_2, multichannel=True) print(float('%.2f' % psnr)) print(float('%.2f' % mse)) print(float('%.2f' % ssim)) print('\n')
def evaluate(target, output): psnr = peak_signal_noise_ratio(target, output, data_range=255) ssim = structural_similarity(target, output, data_range=255, gaussian_weights=True, use_sample_covariance=False, multichannel=True) print(f'psnr: {psnr}') print(f'ssim: {ssim}') return psnr, ssim
def test_denoise_nl_means_3d(fast_mode, dtype): img = np.zeros((12, 12, 8), dtype=dtype) img[5:-5, 5:-5, 2:-2] = 1. sigma = 0.3 imgn = img + sigma * np.random.randn(*img.shape) imgn = imgn.astype(dtype) psnr_noisy = peak_signal_noise_ratio(img, imgn) for s in [sigma, 0]: denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, fast_mode=fast_mode, multichannel=False, sigma=s) # make sure noise is reduced assert_(peak_signal_noise_ratio(img, denoised) > psnr_noisy)
def test_wavelet_denoising(img, multichannel, convert2ycbcr): rstate = np.random.RandomState(1234) sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # Verify that SNR is improved with internally estimated sigma denoised = restoration.denoise_wavelet(noisy, multichannel=multichannel, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # SNR is improved less with 1 wavelet level than with the default. denoised_1 = restoration.denoise_wavelet(noisy, multichannel=multichannel, wavelet_levels=1, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) assert_(psnr_denoised > psnr_denoised_1) assert_(psnr_denoised_1 > psnr_noisy) # Test changing noise_std (higher threshold, so less energy in signal) res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma, multichannel=multichannel, rescale_sigma=True) res2 = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=multichannel, rescale_sigma=True) assert_(np.sum(res1**2) <= np.sum(res2**2))
def test_wavelet_denoising_channel_axis(channel_axis, convert2ycbcr): rstate = np.random.RandomState(1234) sigma = 0.1 img = astro_odd noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) img = np.moveaxis(img, -1, channel_axis) noisy = np.moveaxis(noisy, -1, channel_axis) # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy)
def forward(self, img1, img2): """ input: img1/img2: (H W C) uint8 ndarray. return: psnr score, float. """ img1, img2 = img1.copy(), img2.copy() return peak_signal_noise_ratio(img1, img2, data_range=self.data_range)
def test_psnr1(self): if has_skimage: res1 = psnr(self.id_coins, self.id_coins_noisy, data_range = self.dc1.max()) res2 = peak_signal_noise_ratio(self.id_coins.as_array(), self.id_coins_noisy.as_array()) print('Check PSNR for CAMERA image gaussian noise') np.testing.assert_almost_equal(res1, res2, decimal=3) else: self.skipTest("scikit0-image not present ... skipping")
def test_PSNR_float(): p_uint8 = peak_signal_noise_ratio(cam, cam_noisy) p_float64 = peak_signal_noise_ratio(cam / 255., cam_noisy / 255., data_range=1) assert_almost_equal(p_uint8, p_float64, decimal=5) # mixed precision inputs p_mixed = peak_signal_noise_ratio(cam / 255., np.float32(cam_noisy / 255.), data_range=1) assert_almost_equal(p_mixed, p_float64, decimal=5) # mismatched dtype results in a warning if data_range is unspecified with expected_warnings(['Inputs have mismatched dtype']): p_mixed = peak_signal_noise_ratio(cam / 255., np.float32(cam_noisy / 255.)) assert_almost_equal(p_mixed, p_float64, decimal=5)
def batch_psnr(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 for i in range(Img.shape[0]): PSNR += peak_signal_noise_ratio(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range) return (PSNR / Img.shape[0])
def PSNR(op, t): batch_size = op.shape[0] psnr = sum([ peak_signal_noise_ratio( to_img(op[i]).cpu().detach().numpy(), to_img(t[i]).cpu().detach().numpy()) for i in range(op.shape[0]) ]) / batch_size #print(psnr.size()) return psnr
def compression(image_path, save_folder, sample_percentages): # Define vectors to hold metric results. ssim_results = np.zeros(len(sample_percentages)) mse_results = np.zeros(len(sample_percentages)) psnr_results = np.zeros(len(sample_percentages)) # Read in image and calculate dimensions. original_image, ny, nx, n_channels = read_image(image_path, as_gray=False) final_result = np.zeros(original_image.shape, dtype='uint8') masks = np.zeros(original_image.shape, dtype='uint8') # Iterate through each sample percentage value. for i, sample_percentage in enumerate(sample_percentages): print(f'Samples = {100 * sample_percentage}%') start = time() # Get random sample indices so they're the same for all channels ri = generate_random_samples(nx * ny, sample_percentage) # Iterate through each color channel for j in range(n_channels): # Randomly sample from the image with the given percentage. # Retrieve the samples (b) and the masked image. b, masks[:, :, j] = create_mask(original_image[:, :, j], ri) # Compute results using OWL-QN final_result[:, :, j] = owl_qn_cs(original_image[:, :, j], nx, ny, ri, b) # Compute Structural Similarity Index (SSIM) of # reconstructed image versus original image. ssim_results[i] = structural_similarity(original_image, final_result, data_range=final_result.max() - final_result.min(), multichannel=True) mse_results[i] = mean_squared_error(original_image, final_result) psnr_results[i] = peak_signal_noise_ratio( original_image, final_result, data_range=final_result.max() - final_result.min()) # Save images. imageio.imwrite( f'results/{save_folder}mask_{trunc( 100 * sample_percentage )}.png', masks) imageio.imwrite( f'results/{save_folder}recover_{trunc( 100 * sample_percentage )}.png', final_result) print(f'Elapsed Time: {time() - start:.3f} seconds.\n') for i, sample_percentage in enumerate(sample_percentages): print( f'{trunc( 100 * sample_percentage ): 6.2f}%:\n SSIM: {ssim_results[ i ]}\n MSE: {mse_results[ i ]}\n PSNR: {psnr_results[ i ]}\n' )
def evaluate(root_path): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) # Model checkpoints filepath = root_path / "checkpoints/resnet_checkpoint.pth" # Load model SRResNet srresnet = torch.load(filepath, device).to(device) srresnet.eval() model = srresnet test_dataset = SRImageDataset(dataset_dir=root_path / "sr/DIV2K_valid_HR", crop_size=0, is_valid=True) test_dataloader = DataLoader(dataset=test_dataset, batch_size=1, num_workers=4) print(f"Length of train loader: {len(test_dataloader)}") # Keep track of the PSNRs and the SSIMs across batches PSNRs = AverageMeter("PSNR") SSIMs = AverageMeter("SSIM") # Batches for i, (lr, hr, lr_real) in enumerate(test_loader): # Move to default device lr = lr.to(device) hr = hr.to(device) # Forward prop. sr = model(lr) sr_y = convert_image(sr.unsqueeze(0), source='[-1, 1]', target='y-channel').squeeze(0) hr_y = convert_image(hr.unsqueeze(0), source='[-1, 1]', target='y-channel').squeeze(0) hr_y = hr_y.detach().cpu().numpy() sr_y = sr_y.detach().cpu().numpy() # Calculate PSNR and SSIM psnr = peak_signal_noise_ratio(hr_y, sr_y, data_range=255.) ssim = structural_similarity(hr_y, sr_y, data_range=255.) PSNRs.update(psnr, lr.size(0)) SSIMs.update(ssim, lr.size(0)) # Print average PSNR and SSIM print('PSNR - {psnrs.avg:.3f}'.format(psnrs=PSNRs)) print('SSIM - {ssims.avg:.3f}'.format(ssims=SSIMs)) print("\n")
def compute(self, image, image_test): # mae = mean_absolute_error(self.image, self.image_test) mae = np.average(np.abs(image-image_test)) mse = mean_squared_error(image, image_test) psnr = peak_signal_noise_ratio(image, image_test, data_range=255) ssim = structural_similarity(image, image_test, data_range=255) return mae, mse, psnr, ssim
def test_wavelet_denoising(img, multichannel, convert2ycbcr): rstate = np.random.default_rng(1234) sigma = 0.1 noisy = img + sigma * rstate.standard_normal(img.shape) noisy = np.clip(noisy, 0, 1) channel_axis = -1 if multichannel else None # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert psnr_denoised > psnr_noisy # Verify that SNR is improved with internally estimated sigma denoised = restoration.denoise_wavelet(noisy, channel_axis=channel_axis, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert psnr_denoised > psnr_noisy # SNR is improved less with 1 wavelet level than with the default. denoised_1 = restoration.denoise_wavelet(noisy, channel_axis=channel_axis, wavelet_levels=1, convert2ycbcr=convert2ycbcr, rescale_sigma=True) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) assert psnr_denoised > psnr_denoised_1 assert psnr_denoised_1 > psnr_noisy # Test changing noise_std (higher threshold, so less energy in signal) res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma, channel_axis=channel_axis, rescale_sigma=True) res2 = restoration.denoise_wavelet(noisy, sigma=sigma, channel_axis=channel_axis, rescale_sigma=True) assert np.sum(res1**2) <= np.sum(res2**2)
def test_psnr_matches_skimage_rgb(): prediction = torch.rand(1, 3, 256, 256) target = torch.rand(1, 3, 256, 256) pm_measure = psnr(prediction, target, reduction='mean') sk_measure = peak_signal_noise_ratio(prediction.squeeze().numpy(), target.squeeze().numpy(), data_range=1.0) assert torch.isclose(pm_measure, torch.tensor(sk_measure, dtype=pm_measure.dtype)), \ f"Must match Sklearn version. Got: {pm_measure} and skimage: {sk_measure}"
def psnr_ssim_upsample(lr, hr): hr_img = Image.open(hr) lr_img = Image.open(lr) hrs = hr_img.size lr_up = lr_img.resize((hrs[0], hrs[1]), Image.BICUBIC) hr_img = np.asarray(hr_img) lr_up = np.asarray(lr_up) d = (skm.peak_signal_noise_ratio(hr_img, lr_up), skm.structural_similarity(hr_img, lr_up, multichannel=True)) return d
def test_wavelet_denoising_levels(rescale_sigma): rstate = np.random.RandomState(1234) ndim = 2 N = 256 wavelet = 'db1' # Generate a very simple test image img = 0.2 * np.ones((N, ) * ndim) img[(slice(5, 13), ) * ndim] = 0.8 sigma = 0.1 noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet, rescale_sigma=rescale_sigma) denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=1, rescale_sigma=rescale_sigma) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1) # multi-level case should outperform single level case assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy) # invalid number of wavelet levels results in a ValueError or UserWarning max_level = pywt.dwt_max_level(np.min(img.shape), pywt.Wavelet(wavelet).dec_len) # exceeding max_level raises a UserWarning in PyWavelets >= 1.0.0 with expected_warnings( ['all coefficients will experience boundary effects']): restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=max_level + 1, rescale_sigma=rescale_sigma) with testing.raises(ValueError): restoration.denoise_wavelet(noisy, wavelet=wavelet, wavelet_levels=-1, rescale_sigma=rescale_sigma)
def psrn_on_callBack(x_noise, x_target, autoencoder, randomImage): noiseImage = x_noise[randomImage] cleanImage = x_target[randomImage] noiseImage = np.expand_dims(noiseImage, axis=0) denoised = autoencoder.predict(noiseImage) denoised = np.squeeze(denoised) cleanImage = np.squeeze(cleanImage) noiseImage = np.squeeze(noiseImage) psnr = peak_signal_noise_ratio(denoised, cleanImage, data_range=None) return psnr, denoised, cleanImage, noiseImage
def Compare2ImagesFromLibPSNR(first_image: str, second_image: str) -> None: image1 = cv2.imread(first_image) image2 = cv2.imread(second_image) start = time.time() error = peak_signal_noise_ratio(image1, image2) end = time.time() print('The peak signal-to-noise ratio from skimage.metrics.peak_signal_noise_ratio is ' + str(error) + ' time ' + str(end - start)) return error
def m_psnr(img1: Tensor, img2: Tensor) -> 'Tensor': """ metrics: peak_signal_noise_ratio """ # n, c, h, w = img1.shape psnrs = [] for i1, i2 in zip(img1, img2): psnrs.append( peak_signal_noise_ratio(denorm_img(i1), denorm_img(i2), data_range=255)) return tensor(sum(psnrs) / len(psnrs)) # mean
def get_psnr(root, res_dir): df = get_trip(root) psnr = [] for _, gt, res, f in df.itertuples(): gt = osp.join(root, 'jpgc', gt + '.jpg') res = osp.join(root, res_dir, res + '.jpg') gt, res = (imageio.imread(x) for x in (gt, res)) psnr.append(peak_signal_noise_ratio(gt, res)) print(psnr) print(np.mean(psnr))
def test_psnr2(self): if has_skimage: res1 = psnr(self.dc1, self.dc2, data_range = self.dc1.max()) res2 = peak_signal_noise_ratio(self.dc1.as_array(), self.dc2.as_array()) print('Check PSNR for random ImageData') np.testing.assert_almost_equal(res1, res2, decimal=3) else: self.skipTest("scikit0-image not present ... skipping")
def psnr(img, img_clean): if isinstance(img, torch.Tensor): img = img.data.cpu().numpy() if isinstance(img_clean, torch.Tensor): img_clean = img_clean.data.cpu().numpy() img = img_as_ubyte(img) img_clean = img_as_ubyte(img_clean) PSNR = peak_signal_noise_ratio(img, img_clean, data_range=255) # PSNR = compare_psnr(img, img_clean, data_range=255) return PSNR
def psnr(im1, im2): def im2double(im): min_val, max_val = 0, 255 out = (im.astype(np.float64)-min_val) / (max_val-min_val) return out im1 = im2double(im1) im2 = im2double(im2) psnr = peak_signal_noise_ratio(im1, im2, data_range=1) return psnr
def test_psnr_matches_skimage_greyscale(): x = torch.rand(1, 1, 256, 256) y = torch.rand(1, 1, 256, 256) pm_measure = psnr(x, y, reduction='mean') sk_measure = peak_signal_noise_ratio(x.squeeze().numpy(), y.squeeze().numpy(), data_range=1.0) assert torch.isclose(pm_measure, torch.tensor(sk_measure, dtype=pm_measure.dtype)), \ f"Must match Sklearn version. Got: {pm_measure} and skimage: {sk_measure}"
def parallel_comparison(args): orig_path, comp_path = args orig_img = io.imread(orig_path) comp_img = io.imread(comp_path) comparisons = [] comparisons.append( metrics.structural_similarity(orig_img, comp_img, multichannel=True)) comparisons.append(metrics.peak_signal_noise_ratio(orig_img, comp_img)) comparisons.append(metrics.mean_squared_error(orig_img, comp_img)) comparisons.append(metrics.normalized_root_mse(orig_img, comp_img)) return comparisons
def test_wavelet_denoising_deprecated(): rstate = np.random.RandomState(1234) sigma = 0.1 img = astro_odd noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) with expected_warnings(["`multichannel` is a deprecated argument"]): # Verify that SNR is improved when true sigma is used denoised = restoration.denoise_wavelet(noisy, sigma=sigma, multichannel=True, rescale_sigma=True) psnr_noisy = peak_signal_noise_ratio(img, noisy) psnr_denoised = peak_signal_noise_ratio(img, denoised) assert_(psnr_denoised > psnr_noisy) # providing multichannel argument positionally also warns with expected_warnings(["Providing the `multichannel` argument"]): restoration.denoise_wavelet(noisy, sigma, 'db1', 'soft', None, True, rescale_sigma=True)
def psnr_fn(y, y_pred): """ Args: y (List): list of labels y_pred (List): list of predictions Returns: (float) PSNR """ from skimage.metrics import peak_signal_noise_ratio return peak_signal_noise_ratio(y, y_pred)
def _finalize_iteration(self): left_out_np = torch_to_np(self.left_net_outputs[0]) right_out_np = torch_to_np(self.right_net_outputs[0]) original_image = self.images[0] mask_out_np = torch_to_np(self.mask_net_outputs[0]) self.current_psnr = peak_signal_noise_ratio( original_image, mask_out_np * left_out_np + (1 - mask_out_np) * right_out_np) # TODO: run only in the second step if self.current_psnr > 30: self.second_step_done = True
def sk_psnr(image_true_pil, image_test_pil): """ Peak Signal to Noise Ratio, PSNR """ image_true_np = np.array(image_true_pil) image_test_np = np.array(image_test_pil) psnr = sk_metrics.peak_signal_noise_ratio(image_true=image_true_np, image_test=image_test_np, data_range=255) return psnr