def test(name_img, model, img, sr_factor, gt=False, img_gt=None): out_file = r'../output' if not os.path.exists(out_file): os.makedirs(out_file) model.eval() img_bicubic = img.resize( (int(img.size[0] * sr_factor), int(img.size[1] * sr_factor)), resample=PIL.Image.BICUBIC) img_bicubic.save(os.path.join(out_file, name_img + '_bicubic.png')) input = transforms.ToTensor()(img_bicubic) input = torch.unsqueeze(input, 0) input = input.to(device) with torch.no_grad(): out = model(input) out = out.data.cpu() out = out.clamp(min=0, max=1) out = torch.squeeze(out, 0) out = transforms.ToPILImage()(out) out.save(os.path.join(out_file, name_img + '_zssr.png')) if gt: ssim_bicubic = compute_ssim(img_gt, img_bicubic) psnr_bicubic = compute_psnr(img_gt, img_bicubic) ssim_zssr = compute_ssim(img_gt, out) psnr_zssr = compute_psnr(img_gt, out) print("psnr_bicubic:\t{:.2f}".format(psnr_bicubic)) print("ssim_bicubic:\t{:.4f}".format(ssim_bicubic)) print("psnr_zssr:\t{:.2f}".format(psnr_zssr)) print("ssim_zssr:\t{:.4f}".format(ssim_zssr)) fo = open(os.path.join(out_file, 'PSNR_and_SSIM.txt'), mode='a') fo.write(str(name_img) + ':\n') fo.write( '\tbicubic: psnr:{:.2f}\tssim:{:.4f}\tzssr: psnr:{:.2f}\tssim:{:.4f}\n' .format(psnr_bicubic, ssim_bicubic, psnr_zssr, ssim_zssr)) return ssim_bicubic, psnr_bicubic, ssim_zssr, psnr_zssr
def test(path, testing_loader, pae): i = 1 model = torch.load(path, map_location='cuda:0') model.eval() mse = torch.nn.MSELoss() avg_ssim = 0 avg_psnr = 0 max_bound = 0 avg_time_consume = 0 with torch.no_grad(): for batch_num, (data, target) in enumerate(testing_loader): data, target = data.cuda(), target.cuda() time_start = time.time() prediction = model(data) torch.cuda.synchronize() time_end = time.time() avg_time_consume += (time_end - time_start) prediction = (prediction.cpu() * 128 * pae * 0.7 + 128) target = (target.cpu() * 128 * pae * 0.7 + 128) mse_value = mse(prediction, target) psnr = 10 * log10(65025 / mse_value.item()) avg_psnr += psnr abs_value = np.abs(prediction - target) abs_value = abs_value.numpy() if abs_value.max() > max_bound: max_bound = abs_value.max() prediction = prediction.numpy() prediction = np.reshape(prediction, (prediction.shape[2], prediction.shape[3])) target = np.reshape(target, (target.shape[2], target.shape[3])) ssim = compute_ssim(prediction, target) avg_ssim += ssim i += 1 print(" The pae is: {}".format(pae)) print(" Average PSNR: {:.2f} dB".format(avg_psnr / len(testing_loader))) print(" Average SSIM: {:.4f} dB".format(avg_ssim / len(testing_loader))) print(" bound: {:.3f}".format(max_bound)) print(" Average time consumption: {:.3f} s".format(avg_time_consume / len(testing_loader)))
def test(path, testing_loader, pae): model = torch.load(path) model.eval() mse = torch.nn.MSELoss() avg_ssim = 0 avg_psnr = 0 max_bound = 0 with torch.no_grad(): for batch_num, (data, target) in enumerate(testing_loader): data, target = data.cuda(), target.cuda() prediction = model(data) prediction = (prediction.cpu() * 128 * pae * 0.7 + 128) target = (target.cpu() * 128 * pae * 0.7 + 128) mse_value = mse(prediction, target) psnr = 10 * log10(65025 / mse_value.item()) avg_psnr += psnr progress_bar(batch_num, len(testing_loader), 'PSNR: %.4f' % (avg_psnr / (batch_num + 1))) abs_value = np.abs(prediction - target) abs_value = abs_value.numpy() if abs_value.max() > max_bound: max_bound = abs_value.max() prediction = prediction.numpy() prediction = np.reshape(prediction, (prediction.shape[2], prediction.shape[3])) target = np.reshape(target, (target.shape[2], target.shape[3])) ssim = compute_ssim(prediction, target) avg_ssim += ssim # break print(" Average PSNR: {:.4f} dB".format(avg_psnr / len(testing_loader))) print(" Average SSIM: {:.4f} dB".format(avg_ssim / len(testing_loader))) print("bound: " + str(max_bound)) return avg_psnr / len(testing_loader), avg_ssim / len( testing_loader), max_bound
torch.cuda.synchronize() time_list[i] = start.elapsed_time(end) # milliseconds else: start.record() out = crop_forward(im_input, model) end.record() torch.cuda.synchronize() time_list[i] = start.elapsed_time(end) # milliseconds sr_img = utils.tensor2np(out.detach()[0]) if opt.is_y is True: im_label = utils.quantize(sc.rgb2ycbcr(im_gt)[:, :, 0]) im_pre = utils.quantize(sc.rgb2ycbcr(sr_img)[:, :, 0]) else: im_label = im_gt im_pre = sr_img psnr_list[i] = utils.compute_psnr(im_pre, im_label) ssim_list[i] = utils.compute_ssim(im_pre, im_label) output_folder = os.path.join(opt.output_folder, imname.split('/')[-1]) if not os.path.exists(opt.output_folder): os.makedirs(opt.output_folder) sio.imsave(output_folder, sr_img) i += 1 print("Mean PSNR: {}, SSIM: {}, Time: {} ms".format(np.mean(psnr_list), np.mean(ssim_list), np.mean(time_list)))
out_img_p = out_p.detach().numpy().squeeze() out_img_p = utils.convert_shape(out_img_p) if opt.isHR: if opt.only_y is True: im_label = utils.quantize(sc.rgb2ycbcr(im_gt)[:, :, 0]) im_pre = utils.quantize(sc.rgb2ycbcr(out_img_c)[:, :, 0]) else: im_label = im_gt im_pre = out_img_c psnr_sr[i] = utils.compute_psnr( utils.shave(im_label, opt.upscale_factor), utils.shave(im_pre, opt.upscale_factor)) ssim_sr[i] = utils.compute_ssim( utils.shave(im_label, opt.upscale_factor), utils.shave(im_pre, opt.upscale_factor)) i += 1 output_c_folder = os.path.join( opt.output_folder, imname.split('/')[-1].split('.')[0] + '_c.png') output_s_folder = os.path.join( opt.output_folder, imname.split('/')[-1].split('.')[0] + '_s.png') output_p_folder = os.path.join( opt.output_folder, imname.split('/')[-1].split('.')[0] + '_p.png') if not os.path.exists(opt.output_folder): os.makedirs(opt.output_folder)
with torch.no_grad(): for gt_vid in validation_generator: gt_vid = gt_vid.cuda() if not args.two_bucket: # b1 = c2b(gt_vid) # (N,1,H,W) b1 = torch.mean(gt_vid, dim=1, keepdim=True) # interm_vid = utils.impulse_inverse(b1, block_size=args.blocksize) highres_vid = uNet(b1) # (N,16,H,W) else: b1, b0 = c2b(gt_vid) b_stack = torch.cat([b1, b0], dim=1) highres_vid = uNet(b_stack) val_psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item() val_ssim_sum += utils.compute_ssim(highres_vid, gt_vid).item() ## loss final_loss = utils.weighted_L1loss(highres_vid, gt_vid) tv_loss = utils.gradx(highres_vid).abs().mean() + utils.grady( highres_vid).abs().mean() val_loss_sum += (final_loss + 0.1 * tv_loss).item() if val_iter % 1000 == 0: print('In val iter %d' % (val_iter)) val_iter += 1 logging.info('Total val iterations: %d' % (val_iter)) logging.info( 'Finished validation with loss: %.4f psnr: %.4f ssim: %.4f' %
time.sleep(2.0) # batch process for filename in os.listdir(args["path"]): print(filename) try: image = cv2.imread(args["path"] + "/" + filename, 1) t1 = time.time() upscaled = sr.upsample(image) t2 = time.time() bicubic = cv2.resize(image, (upscaled.shape[1], upscaled.shape[0]), interpolation=cv2.INTER_CUBIC) (B1, G1, R1) = cv2.split(bicubic) (B2, G2, R2) = cv2.split(upscaled) ssim1 = compute_ssim(B1, B2) ssim2 = compute_ssim(G1, G2) ssim3 = compute_ssim(R1, R2) mssim = (ssim1 + ssim2 + ssim3) / 3 mssim_res = str(round(mssim * 100)) + 'E-2' if mssim < 1 else str(mssim) psnr1 = psnr(B1, B2) psnr2 = psnr(G1, G2) psnr3 = psnr(R1, R2) mpsnr = (psnr1 + psnr2 + psnr3) / 3 mpsnr_res = str(round(mpsnr * 100)) + 'E-2' if mpsnr < 1 else str( round(mpsnr)) out_filename = filename.split(".png")[0] + "_" + str( modelName) + "_x" + str(modelScale) + "_t" + str(round( t2 - t1, 2)) + "_s" + str(mssim_res) + "_p" + str(mpsnr_res) + ".png"
with torch.no_grad(): for gt_vid in validation_generator: gt_vid = gt_vid.cuda() if not args.two_bucket: b1 = c2b(gt_vid) # (N,1,H,W) # b1 = torch.mean(gt_vid, dim=1, keepdim=True) interm_vid = invNet(b1) else: b1, b0 = c2b(gt_vid) b_stack = torch.cat([b1, b0], dim=1) interm_vid = invNet(b_stack) highres_vid = uNet(interm_vid) # (N,9,H,W) val_psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item() val_ssim_sum += utils.compute_ssim(highres_vid, gt_vid).item() psnr = utils.compute_psnr(highres_vid, gt_vid).item() / gt_vid.shape[0] ssim = utils.compute_ssim(highres_vid, gt_vid).item() / gt_vid.shape[0] ## loss if args.intermediate: interm_loss = utils.weighted_L1loss(interm_vid, gt_vid).item() final_loss = utils.weighted_L1loss(highres_vid, gt_vid).item() tv_loss = utils.gradx(highres_vid).abs().mean().item( ) + utils.grady(highres_vid).abs().mean().item() if args.intermediate: