def test_psnr(): """Test torch implementation of the PSNR (peak signal to noise ratio) metric""" psnr = PSNR(data_range=255) images = _load_test_images() for image in images: image_c = _corrupt_image(image) result = psnr(torch.Tensor(image), torch.Tensor(image_c)).numpy() desired = measure.compare_psnr(image, image_c) np.testing.assert_allclose(result, desired, rtol=RTOL)
def __init__(self, args, train_loader, train_sampler, valid_loader, my_model, my_loss, ckp): self.args = args self.scale = args.scale[0] self.ckp = ckp self.loader_train = train_loader self.loader_valid = valid_loader self.train_sampler = train_sampler self.model = my_model self.loss = my_loss self.optimizer = utility.make_optimizer(args, self.model) self.psnr_fn = PSNR(boundary_ignore=40) # Postprocessing function to obtain sRGB images self.postprocess_fn = SimplePostProcess(return_np=True) if 'L1' in args.loss: self.aligned_loss = L1(boundary_ignore=None).cuda(args.local_rank) elif 'MSE' in args.loss: self.aligned_loss = L2(boundary_ignore=None).cuda(args.local_rank) elif 'CB' in args.loss: self.aligned_loss = CharbonnierLoss(boundary_ignore=None).cuda( args.local_rank) elif 'MSSSIM' in args.loss: self.aligned_loss = MSSSIMLoss(boundary_ignore=None).cuda( args.local_rank) if self.args.fp16: self.scaler = GradScaler() self.best_psnr = 0. self.best_epoch = 0 if self.args.load != '': self.optimizer.load(ckp.dir, epoch=len(ckp.log)) self.error_last = 1e8 self.glob_iter = 0 self.log_dir = LOG_DIR + "/" + args.save self.img_save_dir = IMG_SAVE_DIR + "/" + args.save # Where to load model self.load_model_dir = LOAD_MODEL_DIR + "/" + args.save # Where to save new model self.save_model_dir = SAVE_MODEL_DIR + "/" + args.save # Where to save visualization images (for report) self.results_dir = RESULTS_DIR + "/" + args.save self.writer = SummaryWriter(log_dir=self.log_dir) utility.mkdir(self.save_model_dir) utility.mkdir(self.img_save_dir) utility.mkdir(self.log_dir) utility.mkdir('frames')
def evaluate_single_epoch(config, model, dataloader, criterion, epoch, writer, postfix_dict): model.eval() with torch.no_grad(): batch_size = config.eval.batch_size total_size = len(dataloader.dataset) total_step = math.ceil(total_size / batch_size) tbar = tqdm.tqdm(enumerate(dataloader), total=total_step) total_psnr = 0 total_psnr_bic = 0 total_loss = 0 for i, (HR_img, LR_img, BC_img) in tbar: HR_img = HR_img[:,:1].to(device) LR_img = LR_img[:,:1].to(device) BC_img = BC_img[:,:1].to(device) target_scale = config.model.params.scale_factor target_scale_v = torch.Tensor(np.zeros(batch_size) + target_scale).to(device) pred_img, pred_scale = model.forward(HR_img, target_scale_v) if type(pred_img) == list: pred_img = pred_img[-1] total_loss += criterion(pred_img, pred_scale, HR_img, target_scale_v).item() total_psnr += PSNR(pred_img.cpu(), HR_img.cpu(), s=target_scale) f_epoch = epoch + i / total_step desc = '{:5s}'.format('val') desc += ', {:06d}/{:06d}, {:.2f} epoch'.format(i, total_step, f_epoch) tbar.set_description(desc) tbar.set_postfix(**postfix_dict) log_dict = {} avg_loss = total_loss / (i+1) avg_psnr = total_psnr / (i+1) log_dict['loss'] = avg_loss log_dict['psnr'] = avg_psnr for key, value in log_dict.items(): if writer is not None: writer.add_scalar('val/{}'.format(key), value, epoch) postfix_dict['val/{}'.format(key)] = value return avg_psnr
def get_current_scalars(self): losses = {} losses['loss_consistent_hr'] = self.loss_consistent_hr.item() losses['loss_consistent_lr'] = self.loss_consistent_lr.item() losses['loss_img_smooth'] = self.loss_img_smooth.item() losses['loss_perceptural_hr'] = self.loss_perceptural_hr.item() losses['loss_perceptural_lr'] = self.loss_perceptural_lr.item() losses['loss_texture_matching'] = self.loss_texture_matching.item() losses['loss_G_D'] = self.loss_G_D.item() losses['loss_D'] = self.loss_D.item() losses['loss_G'] = self.loss_G.item() if self.hr_img_ref_gt is not None: losses['PSNR'] = PSNR(self.hr_img.data, self.hr_img_ref_gt) return losses
def get_current_scalars(self): losses = {} #losses['loss_self_ref'] = self.loss_self_ref.item() #losses['loss_self_others'] = self.loss_self_others.item() #losses['loss_img_smooth'] = self.loss_img_smooth.item() losses['loss_cyc'] = self.loss_cyc.item() #losses['loss_perceptural'] = self.loss_perceptural.item() #losses['loss_texture_matching'] = self.loss_texture_matching.item() losses['loss_G_D'] = self.loss_G_D.item() losses['loss_D'] = self.loss_D.item() losses['loss_G'] = self.loss_G.item() if self.hr_img_ref_gt is not None: #losses['PSNR'] = PSNR(self.synthesis_output.data, self.hr_img_ref_gt) losses['PSNR'] = PSNR(self.hr_img_ref.data, self.hr_img_ref_gt) return losses
def validate(self, val_batch, current_step): avg_psnr = 0.0 avg_ssim = 0.0 idx = 0 for _, val_data in enumerate(val_batch): idx += 1 img_name = os.path.splitext( os.path.basename(val_data['LR_path'][0]))[0] img_dir = os.path.join( self.opt['path']['checkpoints']['val_image_dir'], img_name) util.mkdir(img_dir) self.val_lr = val_data['LR'].to(self.device) self.val_hr = val_data['HR'].to(self.device) self.G.eval() with torch.no_grad(): self.val_sr = self.G(self.val_lr) self.G.train() val_LR = self.val_lr.detach()[0].float().cpu() val_SR = self.val_sr.detach()[0].float().cpu() val_HR = self.val_hr.detach()[0].float().cpu() sr_img = util.tensor2img(val_SR) # uint8 gt_img = util.tensor2img(val_HR) # uint8 # Save SR images for reference save_img_path = os.path.join( img_dir, '{:s}_{:d}.png'.format(img_name, current_step)) cv2.imwrite(save_img_path, sr_img) # calculate PSNR crop_size = 4 gt_img = gt_img / 255. sr_img = sr_img / 255. cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :] cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :] avg_psnr += PSNR(cropped_sr_img * 255, cropped_gt_img * 255) avg_ssim += SSIM(cropped_sr_img * 255, cropped_gt_img * 255) avg_psnr = avg_psnr / idx avg_ssim = avg_ssim / idx return avg_psnr, avg_ssim
def main(): zurich_raw2rgb = ZurichRAW2RGB(root='PATH_TO_ZURICH_RAW_TO_RGB', split='test') dataset = SyntheticBurst(zurich_raw2rgb, burst_size=3, crop_sz=256) data_loader = DataLoader(dataset, batch_size=2) # Function to calculate PSNR. Note that the boundary pixels (40 pixels) will be ignored during PSNR computation psnr_fn = PSNR(boundary_ignore=40) # Postprocessing function to obtain sRGB images postprocess_fn = SimplePostProcess(return_np=True) for d in data_loader: burst, frame_gt, flow_vectors, meta_info = d # A simple baseline which upsamples the base image using bilinear upsampling burst_rgb = burst[:, 0, [0, 1, 3]] burst_rgb = burst_rgb.view(-1, *burst_rgb.shape[-3:]) burst_rgb = F.interpolate(burst_rgb, scale_factor=8, mode='bilinear') # Calculate PSNR score = psnr_fn(burst_rgb, frame_gt) print('PSNR is {:0.3f}'.format(score)) meta_info = convert_dict(meta_info, burst.shape[0]) # Apply simple post-processing to obtain RGB images pred_0 = postprocess_fn.process(burst_rgb[0], meta_info[0]) gt_0 = postprocess_fn.process(frame_gt[0], meta_info[0]) pred_0 = cv2.cvtColor(pred_0, cv2.COLOR_RGB2BGR) gt_0 = cv2.cvtColor(gt_0, cv2.COLOR_RGB2BGR) # Visualize input, ground truth cv2.imshow('Input (Demosaicekd + Upsampled)', pred_0) cv2.imshow('GT', gt_0) input_key = cv2.waitKey(0) if input_key == ord('q'): return
def test_PSNR_sanity(self): A = K.ones((10, 10, 3)) B = K.zeros((10, 10, 3)) self.assertEqual(K.get_value(PSNR(A, A)), np.inf) self.assertEqual(K.get_value(PSNR(A, B)), 0)
def test_train(args, model, test_dataloader): #model.eval() print('=====> test training begin!') #for i, data in enumerate(test_dataloader): img_lr = None #lr_list = [] img_hr = None #t = torchvision.transforms.Compose([torchvision.transforms.ToPILImage(), torchvision.transforms.RandomResizedCrop(size=224)]) for i in range(args.start_epoch, args.end_epoch): if i > 0 and i % 99 == 0: #checkpoint(i) model.noise_amp = model.noise_amp / 10.0 model.update_lr() for j, data in enumerate(test_dataloader): img_lr = data['lr_image'] img_lr = img_lr.expand(args.batch_size, -1, -1, -1) img_lr = img_lr.cuda().clone().float() img_hr = data['hr_image'] img_hr = img_hr.expand(args.batch_size, -1, -1, -1) img_hr = img_hr.cuda().clone().float() #img_lr, img_hr = img_resize(img_hr, t) #print(img_hr.size()) img_llr = nn.functional.avg_pool2d(img_lr, kernel_size=4) #img_hlr = nn.functional.avg_pool2d(img_hr, kernel_size=2) #img_lllr = nn.functional.avg_pool2d(img_llr, kernel_size=2) model.set_train_data(img_llr, img_llr, img_lr) model.set_ground_truth(img_lr, img_lr) model.optimize() #model.set_train_data(img_lr, img_lr, img_hr, img_hr) #model.set_ground_truth(img_hr, img_hr) #model.optimize() #if i==0: # lr_list.append(img_llr) # for j in range(30): # m = nn.Upsample(size=[22+2*j, 22+2*j],mode='bilinear',align_corners=True) # lr_list.append(m(img_lr)) # output = m(img_lr).cpu()[0].permute(1,2,0).detach().numpy() # output = skimage.img_as_ubyte(output) # skimage.io.imsave(os.path.join(args.result_dir, 'SR', 'lr_list_{}.png'.format(j)), output) #model.set_train_data(lr_list[0:-1]) #model.set_ground_truth(lr_list[1:]) #model.optimize() scalars = model.get_current_scalars() print('epoches', i, 'step', j, scalars) #hr = model.net_sr(img_lr) output = model.net_sr(img_lr) + model.upsample_4(img_lr) res = model.net_G(output) output = output + res #output = model.net_sr(img_hr) #noise = torch.randn(model.opts.batch_size, model.opts.n_colors, model.opts.im_crop_H, model.opts.im_crop_W).cuda() * model.noise_amp #print(model.noise_amp) #output = model.net_G(noise) #noise = torch.randn(model.opts.batch_size, model.opts.n_colors, model.opts.im_crop_H, model.opts.im_crop_W).cuda() * model.noise_amp #output = model.net_sr(img_lr) #output = model.net_sr(output) #lr_feature_head = model.net_Feature_Head(img_hr) #lr_content_feature = model.net_Feature_extractor(lr_feature_head) #lr_content_output = lr_feature_head + lr_content_feature #hr_img = model.net_Upscalar(lr_content_output) #res = model.net_Ghr(hr_img) #output = hr_img + res #output = model.net_G.upscale_layers[0](lr_list[0]) #for i in range(0, model.net_G.n_blocks): # if i==0: # x = torch.cat([lr_list[i], lr_list[i]], dim=1) # output = model.net_G.upscale_layers[i](x) # else: # x = torch.cat([output, lr_list[i]], dim=1) # output = model.net_G.upscale_layers[i](x) #output = model.net_G.upscale_layers[-1](output) #print(output.shape) PSNR_value = PSNR(output.data, img_hr) print('PSNR: {}'.format(PSNR_value)) output = output.cpu()[0].permute(1, 2, 0).detach().numpy() output[output > 1] = 1 output[output < -1] = -1 output = skimage.img_as_ubyte(output) skimage.io.imsave( os.path.join(args.result_dir, 'SR', 'SR_{}.png'.format('testtrain_SRNet_lr')), output)
def test(args, model, test_dataloader): PSNR_total = [] SSIM_total = [] #model.eval() print('=====> test sr begin!') with torch.no_grad(): for i, data in enumerate(test_dataloader): #torch.Size([1, 3, 320, 320]) img_ref = data['image_center'] img_oth = data['image_others'] #img_oth = torch.squeeze(img_oth) img_adv_cen = data['img_adv_cen'] img_adv_ref = data['img_adv_ref'] img_oth = img_oth.squeeze(0) img_adv_ref = img_adv_ref.squeeze(0) img_ref = img_ref.expand(args.batch_size, -1, -1, -1) img_adv_cen = img_adv_cen.expand(args.batch_size, -1, -1, -1) image_others = (img_oth.cuda())[:, :, :args.im_crop_H, :args. im_crop_W].clone().float() #print(img_ref.shape) #image_ref = img_ref.expand(args.batch_size-1, -1, -1, -1) #image_ref = (image_ref.cuda())[:, :, :args.im_crop_H, :args.im_crop_W].clone().float() image_ref = (img_ref.cuda())[:, :, :args.im_crop_H, :args. im_crop_W].clone().float() lr_image_ref = nn.functional.avg_pool2d(image_ref, kernel_size=args.scale) lr_image_others = nn.functional.avg_pool2d(image_others, kernel_size=args.scale) image_adv_cen = img_adv_cen.cuda().clone().float() image_adv_ref = img_adv_ref.cuda().clone().float() ''' hr_val = model.net_sr(lr_image_ref) hr_ref = model.net_sr(lr_image_others) #flows_ref_to_other = model.net_flow(image_ref, image_others) flows_ref_to_other = model.net_flow(hr_val, hr_ref) #flows_other_to_ref = model.net_flow(image_others, image_ref) #flow_12_1 = flows_ref_to_other[0]*20.0 #flow_12_2 = flows_ref_to_other[1]*10.0 #flow_12_3 = flows_ref_to_other[2]*5.0 #flow_12_4 = flows_ref_to_other[3]*2.5 #SR_conv1, SR_conv2, SR_conv3, SR_conv4 = model.net_enc(hr_val) #HR2_conv1, HR2_conv2, HR2_conv3, HR2_conv4 = model.net_enc(hr_ref) #warp_21_conv1 = model.Backward_warper(HR2_conv1, flow_12_1) #warp_21_conv2 = model.Backward_warper(HR2_conv2, flow_12_2) #warp_21_conv3 = model.Backward_warper(HR2_conv3, flow_12_3) #warp_21_conv4 = model.Backward_warper(HR2_conv4, flow_12_4) #hr_val = model.net_dec(SR_conv1, SR_conv2, SR_conv3, SR_conv4, warp_21_conv1,warp_21_conv2, warp_21_conv3,warp_21_conv4) #hr_val = model.net_G1(hr_val, flows_ref_to_other, model.Backward_warper, image_others) hr_val = model.net_G1(hr_val, flows_ref_to_other, model.Backward_warper, hr_ref) #print(hr_val.min(), hr_val.max()) ''' #hr_val = model.net_sr(lr_image_ref) + model.upsample_4(lr_image_ref) #hr_ref = model.net_sr(lr_image_others) + model.upsample_4(lr_image_others) #flows_ref_to_other = model.net_flow(hr_val, hr_ref) #hr_val = model.net_G1(hr_val, flows_ref_to_other, model.Backward_warper, hr_ref) #noise = torch.randn(args.batch_size, args.n_colors, args.im_crop_H, args.im_crop_W).cuda() * 1e-4 #hr_val = model.net_sr(image_ref) #hr_val = model.net_sr(image_ref) + model.upsample_4(image_ref) #hr_val = model.net_sr(image_ref) #hr_val = model.net_G1(hr_val) #hr_val = model.net_G2(image_adv_cen) #res = model.net_G(hr_val) #res = model.net_G1(hr_val) #hr_val = hr_val + res #hr_other_imgs = self.net_sr(lr_other_imgs) #hr_val = model.net_sr(lr_image_ref) #noise = torch.randn(args.batch_size, args.n_colors, args.im_crop_H, args.im_crop_W).cuda() * 0.0001 #hr_val = hr_val + model.net_G(hr_val) #hr_val = model.net_G1(hr_val) lr_feature_head = model.net_Feature_Head(lr_image_ref) lr_content_feature = model.net_Feature_extractor(lr_feature_head) lr_content_output = lr_feature_head + lr_content_feature hr_val = model.net_Upscalar(lr_content_output) hr_val = model.net_G1(hr_val) hr_val_numpy = hr_val.cpu()[0].permute(1, 2, 0).numpy() hr_val_numpy[hr_val_numpy > 1] = 1 hr_val_numpy[hr_val_numpy < -1] = -1 img_sr = skimage.img_as_ubyte(hr_val_numpy) skimage.io.imsave( os.path.join(args.result_dir, 'tempo', 'SR_{}.png'.format(i)), img_sr) #skimage.io.imsave(os.path.join(args.result_dir, 'tempo', 'SR_{}.png'.format(i)), hr_val_numpy) if args.have_gt: PSNR_value = PSNR(hr_val.data, image_ref) SSIM_value = SSIM(hr_val.data, image_ref) PSNR_total.append(PSNR_value) SSIM_total.append(SSIM_value) print('PSNR: {} for patch {}'.format(PSNR_value, i)) print('SSIM: {} for patch {}'.format(SSIM_value, i)) print('Average PSNR: {} for {} patches'.format( sum(PSNR_total) / len(PSNR_total), i)) print('Average SSIM: {} for {} patches'.format( sum(SSIM_total) / len(SSIM_total), i)) if args.save_result: os.makedirs(os.path.join(args.result_dir, 'HR'), exist_ok=True) os.makedirs(os.path.join(args.result_dir, 'LR'), exist_ok=True) os.makedirs(os.path.join(args.result_dir, 'REF'), exist_ok=True) os.makedirs(os.path.join(args.result_dir, 'ADV_CEN'), exist_ok=True) os.makedirs(os.path.join(args.result_dir, 'ADV_REF'), exist_ok=True) #img_gt = skimage.img_as_float(torch.squeeze(img_ref).permute(1,2,0).numpy()) img_gt = skimage.img_as_ubyte( torch.squeeze(img_ref).permute(1, 2, 0).numpy()) skimage.io.imsave( os.path.join(args.result_dir, 'HR', '{}.png'.format(i)), img_gt) skimage.io.imsave( os.path.join(args.result_dir, 'HR', '{}.png'.format(i)), img_gt) img_lr = skimage.img_as_ubyte(lr_image_ref.cpu()[0].permute( 1, 2, 0).numpy()) skimage.io.imsave( os.path.join(args.result_dir, 'LR', '{}.png'.format(i)), img_lr) skimage.io.imsave( os.path.join(args.result_dir, 'LR', '{}.png'.format(i)), img_lr) img_adv_center = skimage.img_as_ubyte( image_adv_cen.cpu()[0].permute(1, 2, 0).numpy()) skimage.io.imsave( os.path.join(args.result_dir, 'ADV_CEN', '{}.png'.format(i)), img_adv_center) for j in range(args.batch_size): os.makedirs(os.path.join(args.result_dir, 'ADV_REF', '{}'.format(j)), exist_ok=True) img_adv_reference = skimage.img_as_ubyte( image_adv_ref.cpu()[j].permute(1, 2, 0).numpy()) skimage.io.imsave( os.path.join(args.result_dir, 'ADV_REF', '{}'.format(j), '{}.png'.format(i)), img_adv_reference) os.makedirs(os.path.join(args.result_dir, 'REF', '{}'.format(j)), exist_ok=True) img_reference = skimage.img_as_ubyte( image_others.cpu()[j].permute(1, 2, 0).numpy()) skimage.io.imsave( os.path.join(args.result_dir, 'REF', '{}'.format(j), '{}.png'.format(i)), img_reference)
def test_lr(args, model, test_dataloader): #model.eval() print('=====> test existing lr begin!') PSNR_total = [] SSIM_total = [] fake_total = [] real_total = [] Loss_function = GANLoss() with torch.no_grad(): for i, data in enumerate(test_dataloader): img_lr = data['lr_image'] img_lr = img_lr.expand(args.batch_size, -1, -1, -1) img_lr = img_lr.cuda().clone().float() #hr_val = model.net_sr(img_lr) #flows_ref_to_other = model.net_flow(self.hr_img_ref_gt, self.hr_img_oth_gt) #flows_other_to_ref = model.net_flow(self.hr_img_oth_gt, self.hr_img_ref_gt) #flow_12_1 = self.flows_ref_to_other[0]*20.0 #flow_12_2 = self.flows_ref_to_other[1]*10.0 #flow_12_3 = self.flows_ref_to_other[2]*5.0 #flow_12_4 = self.flows_ref_to_other[3]*2.5 #SR_conv1, SR_conv2, SR_conv3, SR_conv4 = self.net_enc(self.sr_img_ref) #HR2_conv1, HR2_conv2, HR2_conv3, HR2_conv4 = self.net_enc(self.hr_img_oth_gt) #warp_21_conv1 = self.Backward_warper(HR2_conv1, flow_12_1) #warp_21_conv2 = self.Backward_warper(HR2_conv2, flow_12_2) #warp_21_conv3 = self.Backward_warper(HR2_conv3, flow_12_3) #warp_21_conv4 = self.Backward_warper(HR2_conv4, flow_12_4) #sythsis_output = self.net_dec(SR_conv1, SR_conv2, SR_conv3, SR_conv4, warp_21_conv1,warp_21_conv2, warp_21_conv3,warp_21_conv4) #lr_feature_head = model.net_Feature_Head(img_lr) #lr_content_feature = model.net_Feature_extractor(lr_feature_head) #lr_content_output = lr_feature_head + lr_content_feature #hr_val = model.net_Upscalar(lr_content_output) hr_val = model.net_sr(img_lr) + model.upsample_4(img_lr) #hr_val = model.upsample_4(img_lr) #hr_val = model.net_sr(img_lr) #noise = torch.randn(args.batch_size, args.n_colors, args.im_crop_H, args.im_crop_W).cuda() * 1e-4 #hr_val = hr_val + model.net_G1(hr_val) hr_val = model.net_G1(hr_val) #hr_val = model.net_G1(hr_val) #hr_val = model.net_G2(hr_val) #m = nn.Upsample(size=[args.im_crop_H*3, args.im_crop_W*3],mode='bilinear',align_corners=True) #hr_val = m(hr_val) hr_val_numpy = hr_val.cpu()[0].permute(1, 2, 0).numpy() hr_val_numpy[hr_val_numpy > 1] = 1 hr_val_numpy[hr_val_numpy < -1] = -1 img_sr = skimage.img_as_ubyte(hr_val_numpy) skimage.io.imsave( os.path.join(args.result_dir, 'SR', 'SR_{}.png'.format(i)), img_sr) #skimage.io.imsave(os.path.join(args.result_dir, 'SR_{}.png'.format(i)), img_sr) #dx_hr_img_fake, dy_hr_img_fake, dxy_hr_img_fake = model.gradient_fn(hr_val) #hr_img_fake = torch.cat([dx_hr_img_fake, dy_hr_img_fake, dxy_hr_img_fake], dim=0) #fake = model.net_D(hr_img_fake) #fake = Loss_function(fake, target_is_real=False) #print('fake: {} for patch {}'.format(fake, i)) #fake_total.append(fake) #print('Average fake: {} for {} patches'.format(sum(fake_total) / len(fake_total), i)) if args.have_gt: img_hr = data['hr_image'] img_hr = img_hr.expand(args.batch_size, -1, -1, -1) img_hr = img_hr.cuda().clone().float() #dx_hr_img_real, dy_hr_img_real, dxy_hr_img_real = model.gradient_fn(img_hr) #hr_img_real = torch.cat([dx_hr_img_real, dy_hr_img_real, dxy_hr_img_real], dim=0) #real = model.net_D(hr_img_real) #real = Loss_function(real, target_is_real=True) #print('real: {} for patch {}'.format(real, i)) #real_total.append(real) #print('Average real: {} for {} patches'.format(sum(real_total) / len(real_total), i)) PSNR_value = PSNR(hr_val.data, img_hr) SSIM_value = SSIM(hr_val.data, img_hr) PSNR_total.append(PSNR_value) SSIM_total.append(SSIM_value) print('PSNR: {} for patch {}'.format(PSNR_value, i)) print('SSIM: {} for patch {}'.format(SSIM_value, i)) print('Average PSNR: {} for {} patches'.format( sum(PSNR_total) / len(PSNR_total), i)) print('Average SSIM: {} for {} patches'.format( sum(SSIM_total) / len(SSIM_total), i))