def batchPSNRandSSIMGPU(derain_output, clear_label, normalize=True): if normalize: mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] Img = derain_output.data.cpu().numpy().astype(np.float32) Img = np.transpose(Img, (0, 2, 3, 1)) Iclean = clear_label.data.cpu().numpy().astype(np.float32) Iclean = np.transpose(Iclean, (0, 2, 3, 1)) Img *= std Img += mean Iclean *= std Iclean += mean PSNR = 0 SSIM = 0 for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], 1) SSIM += compare_ssim(Iclean[i, :, :, :], Img[i, :, :, :], multichannel=True) return (PSNR / Img.shape[0]), (SSIM / Img.shape[0]) else: Img = derain_output.data.cpu().numpy().astype(np.float32) Iclean = clear_label.data.cpu().numpy().astype(np.float32) PSNR = 0 SSIM = 0 for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], 1) SSIM += compare_ssim(Iclean[i, :, :, :], Img[i, :, :, :], multichannel=True) return torch.Tensor(PSNR / Img.shape[0]), torch.Tensor(SSIM / Img.shape[0])
def update_impl(self, *, prediction, target): n = prediction.shape[0] self._sum += sum( compare_psnr(im_test=p, im_true=t, data_range=self.data_range) for p, t in zip(prediction.cpu().numpy(), target.cpu().numpy())) self._num_examples += n
def main(): kernel = Kernels.kernel_2d(opt.gksize, opt.gsigma) pad = (opt.gksize - 1) // 2 files_source = glob.glob(os.path.join('data', 'BSD68', '*.png')) files_source.sort() target_folder = os.path.join( 'saved_images', 'baseline_gksize%d_gsigma%d' % (opt.gksize, opt.gsigma)) if not os.path.exists(target_folder): os.makedirs(target_folder) losses = [] for idx, f in enumerate(files_source): # Load and blur the image Img_clear = cv2.imread(f)[:, :, 0] #I_zero = np.zeros(Img_clear.shape) Img_blurred = cv2.filter2D(np.float32(Img_clear), -1, kernel, borderType=cv2.BORDER_CONSTANT) Img_blurred = normalize(np.float32(Img_blurred)) Img_clear = normalize(np.float32(Img_clear)) # Unblur the image IOut_clear = deblurring_estimate(Img_blurred, Img_blurred, kernel, opt.reg_weight) loss = compare_psnr(IOut_clear[pad:-pad, pad:-pad], Img_clear[pad:-pad, pad:-pad], data_range=1.) losses.append(loss) if idx < opt.nb_img_saved: base_name = f.split('/')[-1].split('.')[0] # Convert back to 8-bit grayscale I_clear = np.uint8(denormalize(Img_clear)) I_blurred = np.uint8(denormalize(Img_blurred)) Out_clear = np.uint8(denormalize(IOut_clear)) cv2.imwrite( os.path.join(target_folder, base_name + '_in_clear.png'), I_clear) cv2.imwrite( os.path.join(target_folder, base_name + '_in_blurred.png'), I_blurred) cv2.imwrite( os.path.join(target_folder, base_name + '_out_clear.png'), Out_clear) # Also save the three images side by side grid = np.hstack((I_clear, I_blurred, Out_clear)) cv2.imwrite(os.path.join(target_folder, base_name + '_grid.png'), grid) # Just print the mean PSNR as it's not that useful to create a file to store a single number in this case print('Mean PSNR is', np.mean(losses))
def batch_PSNR(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) return (PSNR/Img.shape[0])
def batch_PSNR(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) if math.isnan(PSNR): import pdb; pdb.set_trace() return (PSNR/Img.shape[0])
def batch_PSNR(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Img = np.uint8(np.clip(Img*255,0 ,255 )) Img = Img.astype(np.float32) / 255. Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 print(Img.shape) for i in range(1): PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) return (PSNR/Img.shape[0])
def forward(self, img, imclean, data_range): Img = img.data.detach().cpu().numpy().astype(np.float32) Iclean = imclean.data.detach().cpu().numpy().astype(np.float32) PSNR = 0 # print(Img.shape) for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range) # print(PSNR) return (PSNR / Img.shape[0])
def batchPSNR(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 # print('Img.shape[0]', Img.shape[0]) for i in range(Img.shape[0]): tmp = compare_psnr(Iclean[i], Img[i], data_range=data_range) # print('%d_PSNR'%i, '%.4f'%tmp) PSNR += tmp # print('PSNR:%.4f'%PSNR, 'shape:%d'%Img.shape[0]) return (PSNR / Img.shape[0])
def batch_PSNR(img, imclean, data_range): Img = img.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = [] for i in range(Img.shape[0]): psnr = compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range) if np.isinf(psnr): continue PSNR.append(psnr) return sum(PSNR) / len(PSNR)
def batch_psnr(img, imclean, data_range): """ add the whole batch's PSNR """ img_cpu = img.data.cpu().numpy().astype(np.float32) imgclean = imclean.data.cpu().numpy().astype(np.float32) psnr = 0 for i in range(img_cpu.shape[0]): psnr += compare_psnr(imgclean[i, :, :, :], img_cpu[i, :, :, :], data_range=data_range) return psnr / img_cpu.shape[0]
def batch_psnr(img, imclean, data_range): img_cpu = img.data.cpu().numpy().astype(np.float32) imgclean = imclean.data.cpu().numpy().astype(np.float32) psnr = 0 ssim = 0 for i in range(img_cpu.shape[0]): psnr += compare_psnr(imgclean[i, 0, :, :], img_cpu[i, 0, :, :], data_range=data_range) ssim += compare_ssim(imgclean[i, 0, :, :], img_cpu[i, 0, :, :], data_range=data_range) return psnr / img_cpu.shape[0], ssim / img_cpu.shape[0]
def inference(test_data, model, device): files_source = glob.glob(os.path.join('testing_data', test_data, '*.png')) files_source.sort() # process data img_idx = 0 std_values = list(range(10,101,10)) psnr_results = np.zeros((len(files_source), len(std_values))) psnr_results2 = np.zeros((len(files_source), len(std_values))) for img_idx, f in enumerate(files_source): # image Img = cv2.imread(f) Img = np.float32(Img[:,:,0]) / 255. if Img.shape[0] % 2 == 1: Img = Img[1:,:] if Img.shape[1] % 2 == 1: Img = Img[:,1:] Img = np.expand_dims(Img, 0) Img = np.expand_dims(Img, 1) # Check dimension parity (even (h,w) for UNet): ISource = torch.Tensor(Img) ISource = ISource.to(device) for noise_idx, noise_std in enumerate(std_values): # create noise noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=noise_std/255.).cuda() # create noisy images INoisy = ISource + noise INoisy = INoisy.to(device) INoisy = torch.clamp(INoisy, 0., 1.) # feed forward then clamp image with torch.no_grad(): model.eval() IDenoised = model(INoisy) IDenoised = torch.clamp(IDenoised, 0., 1.) Img = IDenoised.data.cpu().numpy().astype(np.float32) Iclean = ISource.data.cpu().numpy().astype(np.float32) PSNR = 0. for i in range(Img.shape[0]): PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=1.) psnr_results2[img_idx, noise_idx] = PSNR return psnr_results, psnr_results2
def batch_psnr(img, imclean, data_range): r""" Computes the PSNR along the batch dimension (not pixel-wise) Args: img: a `torch.Tensor` containing the restored image imclean: a `torch.Tensor` containing the reference image data_range: The data range of the input image (distance between minimum and maximum possible values). By default, this is estimated from the image data-type. """ img_cpu = img.data.cpu().numpy().astype(np.float32) imgclean = imclean.data.cpu().numpy().astype(np.float32) psnr = 0 for i in range(img_cpu.shape[0]): psnr += compare_psnr(imgclean[i, :, :, :], img_cpu[i, :, :, :], \ dynamic_range=data_range) return psnr / img_cpu.shape[0]
def sequence_psnr(seq, seqclean, data_range=1.0): r""" Computes the mean PSNR of a sequence (not pixel-wise) Args: seq: array of dims [num_frames, C, H, W], C=1 grayscale or C=3 RGB, H and W are even. seqclean: reference array of dims [num_frames, C, H, W], C=1 grayscale or C=3 RGB, H and W are even. data_range: The data range of the input image (distance between minimum and maximum possible values). By default, this is estimated from the image data-type. """ assert len(seq.shape) == 4 psnr = 0 for i in range(seq.shape[0]): psnr += compare_psnr(seq[i, :, :, :], seqclean[i, :, :, :], data_range=data_range) return psnr / seq.shape[0]
def batch_PSNR(img, iminput, imclean, data_range, pth): Img = img.data.cpu().numpy().astype(np.float32) Iminput = iminput.data.cpu().numpy().astype(np.float32) Iclean = imclean.data.cpu().numpy().astype(np.float32) PSNR = 0 for i in range(Img.shape[0]): print(i, Img.shape) plt.subplot(1, 2, 1) plt.imshow(Img[i, 0, :, :] * 255) if i == 0: plt.title("Noisy Image") plt.subplot(1, 2, 2) plt.imshow(Iminput[i, 0, :, :] * 255) if i == 0: plt.title("Recon. Image") PSNR += compare_psnr(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range) plt.savefig(pth) return (PSNR / Img.shape[0])
def caculate_psnr_ssim(Img, Iclean): PSNR = compare_psnr(Iclean, Img, 255) SSIM = compare_ssim(Iclean, Img, multichannel=True) return PSNR, SSIM
I_20_gray_denoised_lp2 = cv2.filter2D(I_20_gray, -1, kernel_lp2) I_10_gray_denoised_lp2 = cv2.filter2D(I_10_gray, -1, kernel_lp2) I_5_gray_denoised_lp2 = cv2.filter2D(I_5_gray, -1, kernel_lp2) I_1_gray_denoised_lp2 = cv2.filter2D(I_1_gray, -1, kernel_lp2) I_20_gray_denoised_hp1 = cv2.filter2D(I_20_gray, -1, kernel_hp1) I_10_gray_denoised_hp1 = cv2.filter2D(I_10_gray, -1, kernel_hp1) I_5_gray_denoised_hp1 = cv2.filter2D(I_5_gray, -1, kernel_hp1) I_1_gray_denoised_hp1 = cv2.filter2D(I_1_gray, -1, kernel_hp1) I_20_gray_denoised_hp2 = cv2.filter2D(I_20_gray, -1, kernel_hp2) I_10_gray_denoised_hp2 = cv2.filter2D(I_10_gray, -1, kernel_hp2) I_5_gray_denoised_hp2 = cv2.filter2D(I_5_gray, -1, kernel_hp2) I_1_gray_denoised_hp2 = cv2.filter2D(I_1_gray, -1, kernel_hp2) print(compare_psnr(I_20_gray_denoised_lp1, I_20_gray, 1.)) print(compare_psnr(I_20_gray_denoised_lp2, I_20_gray, 1.)) print(compare_psnr(I_10_gray_denoised_lp1, I_10_gray, 1.)) print(compare_psnr(I_10_gray_denoised_lp2, I_10_gray, 1.)) print(compare_psnr(I_5_gray_denoised_lp1, I_5_gray, 1.)) print(compare_psnr(I_5_gray_denoised_lp2, I_5_gray, 1.)) print(compare_psnr(I_1_gray_denoised_lp1, I_1_gray, 1.)) print(compare_psnr(I_1_gray_denoised_lp2, I_1_gray, 1.)) plt.imsave('I_20_gray_denoised_hp1.png', I_20_gray_denoised_hp1) plt.imsave('I_20_gray_denoised_hp2.png', I_20_gray_denoised_hp2) plt.imsave('I_10_gray_denoised_hp1.png', I_10_gray_denoised_hp1)
help= 'Directory of reference images (png, tiff), should only contain these images' ) args = parser.parse_args() files = get_files_pattern(args.refdir, '*') psnr = 0. acc = np.zeros([1], np.float64) acc2 = np.zeros([1], np.float64) for f in files: ref = imageio.imread(args.refdir + '/' + f) if os.path.exists(args.imgdir + '/' + f): img = imageio.imread(args.imgdir + '/' + f) elif os.path.exists(args.imgdir + '/' + f[:-3] + 'tiff'): img = tifffile.imread(args.imgdir + '/' + f[:-3] + 'tiff') else: continue ref = np.squeeze(ref) img = np.squeeze(img) psnr_img = compare_psnr(ref, img, data_range=255) print(f, psnr_img) psnr += psnr_img ref = np.asarray(ref, dtype=np.float64) img = np.asarray(img, dtype=np.float64) acc += np.sum(np.square(ref - img)) acc2 += ref.size print('Average PSNR: ', psnr / len(files)) # The PSNR below is the correct one for video print('PSNR on the sequence: ', 10 * np.log10( (255.**2) / (acc[0] / acc2[0])))
def train(SIGMA, Measure, Phi, pad, block_size, image_name, MODEL_PATH, Img, gamma, is_Bayesian=True, is_MAP=False, Epsilon=1e-3): os.makedirs(MODEL_PATH, exist_ok=True) measure_num = Measure.numel() b, c, w, h = Img.size() net = Decoder(in_channels=opts.in_channels, middle_channels=opts.middle_channels, out_channels=c, is_Bayesian=is_Bayesian, img_size=[w, h]) Input = torch.randn(b, opts.in_channels, int(w / 32), int(h / 32)).cuda() net.cuda() now = datetime.now() optimizer = Adam(net.parameters(), lr=1e-4) criterion = nn.MSELoss(reduction='sum') criterion.cuda() for i in range(opts.iters): net.train() net.zero_grad() Img_rec = net(Input) net_output = get_cs_mearsurement(Img_rec, Phi_tensor, pad, block_size) residual = criterion(net_output, Measure) if is_Bayesian: log_sigma = net.log_sigma_sum() para_square = net.para_square() loss = residual + gamma[0] * ( (SIGMA + Epsilon)**2) * para_square - gamma[1] * ( (SIGMA + Epsilon)**2) * log_sigma else: if is_MAP: weight_sum = 0 for para in net.parameters(): weight_sum += gamma[0] * ( (SIGMA + Epsilon)**2) * torch.sum(abs(para)**2) loss = residual + weight_sum else: loss = residual optimizer.zero_grad() loss.backward(retain_graph=True) optimizer.step() if (i + 1) % 1000 == 0: Img_rec_single = net(Input) now = datetime.now() sys.stdout = Logger(MODEL_PATH + 'results.txt') print(image_name, "loss in ", i + 1, ":", loss.item(), "residual:", residual.item(), now.strftime("%H:%M:%S")) if residual < measure_num * (SIGMA + Epsilon)**2: break Img_rec_aver = np.zeros(Img.size(), dtype=np.float32) aver_num = 100 with torch.no_grad(): for j in range(aver_num): del Img_rec Img_rec = net(Input) optimizer.zero_grad() Img_rec_cpu = Img_rec.cpu().detach().numpy() Img_rec_aver += Img_rec_cpu Img_rec_aver = Img_rec_aver / aver_num Img_rec_aver = np.squeeze(Img_rec_aver) Img_np = Img.cpu().numpy() Img_np = np.squeeze(Img_np) psnr_mc = compare_psnr(Img_np, Img_rec_aver, 1.) Img_rec_aver = np.int32(Img_rec_aver * 255) if Img_rec_aver.ndim == 3: Img_rec_aver = Img_rec_aver.transpose(1, 2, 0) now = datetime.now() sys.stdout = Logger(MODEL_PATH + 'results.txt') print("gamma: ", gamma, image_name, "psnr: ", psnr_mc, now.strftime("%H:%M:%S")) cv2.imwrite(MODEL_PATH + image_name + '_rec.png', Img_rec_aver) torch.save( { 'net': net, 'net_input': Input, 'iters': i + 1, 'net_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, }, MODEL_PATH + image_name + '.pth') return Img_rec_aver
Img = np.expand_dims(Img, axis=0) Img_tensor = torch.FloatTensor(Img).cuda() Measure = get_cs_mearsurement(Img_tensor, Phi_tensor, pad, block_size) Measure += torch.FloatTensor(Measure.size()).normal_( mean=0, std=SIGMA).cuda() img_prex = Img_Name[Img_Name.rfind("/") + 1:Img_Name.rfind(".")] Img_rec = train(SIGMA, Measure, Phi_tensor, pad, block_size, img_prex, MODEL_PATH, Img_tensor, gamma, is_Bayesian, is_MAP, Epsilon) psnr[i] = compare_psnr(Img_rec / 255., img, 1.) ssim[i] = compare_ssim(Img_rec / 255., img, data_range=1.) sys.stdout = Logger(MODEL_PATH + 'psnr.txt') print("gamma:", gamma, 'epsilon:', Epsilon, "cs_ratio:", CS_ratio, "simga:", SIGMA, img_prex, "psnr/ssim:", psnr[i], "/", ssim[i]) i = i + 1 psnr_aver = np.mean(psnr) ssim_aver = np.mean(ssim) sys.stdout = Logger(MODEL_PATH + 'psnr.txt') print("gamma:", gamma, 'epsilon:', Epsilon, "cs_ratio:", CS_ratio, "simga:", SIGMA, "average psnr/ssim:", psnr_aver, "/", ssim_aver, '\n')