LR = F.interpolate(LR, scale_factor=opt.upSampling, mode="nearest").to(device) # 近似于hr_restore HR = HR.to(device) SR = generator(LR) SR = torch.clamp(SR, 0, 1) batch_mse = ((SR - HR) ** 2).data.mean() valing_results['mse'] = valing_results['mse'] + batch_mse * batch_size batch_ssim = pytorch_ssim.ssim(SR, HR).item() valing_results['ssims'] = valing_results['ssims'] + batch_ssim * batch_size valing_results['psnr'] = 10 * log10( (HR.max() ** 2) / (valing_results['mse'] / valing_results['batch_sizes'])) valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes'] val_images.extend( [display_transform()(LR.data.cpu().squeeze(0)), display_transform()(HR.data.cpu().squeeze(0)), display_transform()(SR.data.cpu().squeeze(0))]) sys.stdout.write('\r[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f\n' % ( valing_results['psnr'], valing_results['ssim'])) val_images = torch.stack(val_images) # 按顺序排列 val_images = torch.chunk(val_images, val_images.size(0) // 3) # 3张图为1个单元 index = 1 for image in val_images: image = utils.make_grid(image, nrow=3, padding=1) # 2行三列 utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5) index = index + 1 out_path = 'statistics/' if not os.path.exists(out_path): os.makedirs(out_path)
fake_img = torch.clamp(model_g(input_img), min=0, max=1) mse = ((fake_img - target_img) ** 2).mean().data mse_sum += mse psnr_sum += -10 * torch.log10(mse) rgb_loss_sum += g_loss_module.rgb_loss(fake_img, target_img) mean_loss_sum += g_loss_module.mean_loss(fake_img, target_img) per_loss_sum += g_loss_module.perceptual_loss(fake_img, target_img) col_loss_sum += g_loss_module.color_loss(fake_img, target_img) # generate images if epoch % opt.val_img_interval == 0 and epoch != 0: blur = filter_low_module(fake_img) hf = filter_high_module(fake_img) val_image_list = [ utils.display_transform()(target_img.data.cpu().squeeze(0)), utils.display_transform()(fake_img.data.cpu().squeeze(0)), utils.display_transform()(disc_img.squeeze(0)), utils.display_transform()(blur.data.cpu().squeeze(0)), utils.display_transform()(hf.data.cpu().squeeze(0))] n_val_images = len(val_image_list) val_images.extend(val_image_list) if opt.saving and len(val_loader) > 0: # save validation values writer.add_scalar('val/mse', mse_sum/len(val_set), iteration) writer.add_scalar('val/psnr', psnr_sum / len(val_set), iteration) writer.add_scalar('val/rgb_error', rgb_loss_sum / len(val_set), iteration) writer.add_scalar('val/mean_error', mean_loss_sum / len(val_set), iteration) writer.add_scalar('val/perceptual_error', per_loss_sum / len(val_set), iteration) writer.add_scalar('val/color_error', col_loss_sum / len(val_set), iteration)
generator = Generator(UPSCALE_FACTOR).to(device) generator.load_state_dict(torch.load(MODEL_PATH)) out_path = os.path.join(OUTPUT_PATH, 'evaluation') indx = 1 with torch.no_grad(): for b, (val_lr, val_hr_restore, val_hr) in enumerate(val_loader): val_images = [] lr = val_lr.to(device) hr = val_hr.to(device) sr = generator(lr) val_images.extend([ display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.data.cpu().squeeze(0)), display_transform()(sr.data.cpu().squeeze(0)) ]) val_images = torch.stack(val_images) image = utils.make_grid(val_images, nrow=3, padding=5) utils.save_image(image, os.path.join(out_path, f'validate_index_{indx}.png'), padding=5) indx += 1