def test_single(noise_dir,gt_dir,image_size,num_workers,checkpoint,resume): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dataset = SingleLoader_DGF(noise_dir=noise_dir,gt_dir=gt_dir,image_size=image_size) data_loader = DataLoader(dataset, batch_size=1,shuffle=True, num_workers=num_workers) model = SFD_C_DGF().to(device) if resume != '': save_dict = torch.load(os.path.join(checkpoint,resume)) # model.load_state_dict(save_dict['state_dict']) model.load_state_dict(save_dict['state_dict']) for step, (image_noise_hr,image_noise_lr, image_gt_hr) in enumerate(data_loader): image_noise_hr = image_noise_hr.to(device) image_noise_lr = image_noise_lr.to(device) image_gt_hr = image_gt_hr.to(device) pre = model(image_noise_lr,image_noise_hr) image_gt = np.array(np.transpose(image_gt_hr[0].detach().numpy(), (1, 2, 0))*255,dtype=int) image_noise = np.array(np.transpose(image_noise_hr[0].detach().numpy(), (1, 2, 0))*255,dtype=int) pre = np.array(np.transpose(pre[0].detach().numpy(), (1, 2, 0))*255,dtype=int) # print(pre) print(" Noise : ",psnr(image_noise,image_gt), " pre : ",psnr(pre,image_gt)) plt.subplot(1,2,1) plt.imshow(image_noise) plt.subplot(1, 2, 2) plt.imshow(pre) plt.show()
def compare_folder(origin, codes, res): psnr = [] ssim = [] total_pixels = 0 for filename in os.listdir(origin): original_i = os.path.join(origin, filename) res_i = os.path.join(res, filename) psnr.append(metric.psnr(original_i, res_i)) ssim.append(metric.msssim(original_i, res_i)) total_pixels += utils.get_pixels(original_i) print(psnr[-1], ssim[-1]) total_size = utils.get_size_folder(codes) bpp = total_size / total_pixels print(bpp, mean(psnr), mean(ssim)) return bpp, mean(psnr), mean(ssim)
def test_multi(noise_dir,gt_dir,image_size,image_size_lr,num_workers,checkpoint,resume): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dataset = MultiLoader_DGF(noise_dir=noise_dir,gt_dir=gt_dir,image_size=image_size) data_loader = DataLoader(dataset, batch_size=1,shuffle=False, num_workers=num_workers) model_single = SFD_C_DGF().to(device) model = MFD_C_DFG(model_single).to(device) if resume != '': print(device) save_dict = torch.load(os.path.join(checkpoint, resume), map_location=torch.device('cpu')) # if device == "cpu": # save_dict = torch.load(os.path.join(checkpoint,resume),map_location=torch.device('cpu')) # else: # save_dict = torch.load(os.path.join(checkpoint, resume)) model.load_state_dict(save_dict['state_dict']) model.eval() trans = transforms.ToPILImage() for i in range(10): for step, (image_noise_hr,image_noise_lr, image_gt_hr) in enumerate(data_loader): image_noise_hr_batch = image_noise_hr.to(device) image_noise_lr_batch = image_noise_lr.to(device) image_gt_hr = image_gt_hr.to(device) # print(image_noise_batch.size()) burst_size = image_noise_hr_batch.size()[0] mfinit1, mfinit2, mfinit3,mfinit4,mfinit5,mfinit6,mfinit7 = torch.zeros(7, 1, 64, image_size_lr, image_size_lr).to(device) mfinit8 = torch.zeros(1, 3, image_size_lr, image_size_lr).to(device) i = 0 for i_burst in range(burst_size): frame_hr = image_noise_hr_batch[:, i_burst, :, :, :] frame_lr = image_noise_lr_batch[:, i_burst, :, :, :] # print(frame.size()) if i == 0: i += 1 dframe_lr,dframe_hr, mf1, mf2, mf3, mf4,mf5, mf6, mf7, mf8,mf8_hr = model( frame_lr,frame_hr, mfinit1, mfinit2, mfinit3, mfinit4,mfinit5,mfinit6,mfinit7,mfinit8) else: dframe_lr,dframe_hr, mf1, mf2, mf3, mf4,mf5, mf6, mf7, mf8 , mf8_hr = model(frame_lr,frame_hr, mf1, mf2, mf3, mf4,mf5, mf6, mf7, mf8) # # print(np.array(trans(mf8[0]))) # print(np.array(trans(dframe[0])).shape) # print(np.array(trans(image_gt[0])).shape) # plt.imshow(np.array(trans(dframe[0]))) # plt.show() # plt.imshow(np.array(trans(image_gt[0]))) # plt.show() plt.subplot(1, 2, 1) plt.imshow(np.array(trans(image_gt_hr[0]))) plt.subplot(1, 2, 2) plt.imshow(np.array(trans(dframe_hr[0]))) plt.show() print(psnr(np.array(trans(dframe_hr[0])),np.array(trans(image_gt_hr[0]))))
def get_psnr(res_path, jpeg=False): psnr = [] for i in range(24): j = i + 1 if j >= 10: n_id = str(j) else: n_id = '0' + str(j) original = '/home/williamchen/Dataset/Kodak/kodim' + n_id + '.png' if not jpeg: compared = '{}/{}/00.png'.format(res_path, n_id) else: compared = '{}/{}/00.jpg'.format(res_path, n_id) psnr.append(metric.psnr(original, compared)) return psnr
def demo(img_path): lr_img, hr_img = imgread(img_path) model = pix2pix_model(cfg) model.test_model(lr_img, hr_img) ckpt_path = tf.train.latest_checkpoint('checkpoint') restorer = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: restorer.restore(sess, ckpt_path) hr_image_fake = model.fake_hr_image hr_image_fake = tf.clip_by_value(hr_image_fake, 0, 1) hr_image_fake = sess.run(hr_image_fake) hr_image_fake = hr_image_fake.squeeze() hr_image = sess.run(hr_img) psnr_value = psnr(hr_image.squeeze(), hr_image_fake.squeeze()) print(psnr_value) imshow(hr_image_fake) imshow(hr_image.squeeze())
def build_model(self): # RCAN model self.output = self.SRDenseNet( x=self.x_lr, f=self.n_filters, kernel_size=self.kernel_size, reduction=self.reduction, use_bn=self.use_bn, scale=self.img_scale, ) self.output = tf.clip_by_value(self.output * 255., 0., 255.) # l1 loss # self.loss = tf.reduce_mean(tf.abs(self.output-self.x_hr)) # l2 loss self.loss = tf.losses.mean_squared_error(self.x_hr, self.output) self.train_op = self.opt.minimize(self.loss, global_step=self.global_step) # metrics self.psnr = tf.reduce_mean( metric.psnr(self.x_hr, self.output, m_val=255)) self.ssim = tf.reduce_mean( metric.ssim(self.x_hr, self.output, m_val=255)) # summaries tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size) tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size) tf.summary.image('generated-hr', self.output, max_outputs=self.batch_size) tf.summary.scalar("loss/l2_loss", self.loss) tf.summary.scalar("metric/psnr", self.psnr) tf.summary.scalar("metric/ssim", self.ssim) tf.summary.scalar("misc/lr", self.lr) # merge summary self.merged = tf.summary.merge_all() # model saver self.saver = tf.train.Saver(max_to_keep=2) self.best_saver = tf.train.Saver(max_to_keep=1)
def build_model(self): # RCAN model self.output = self.residual_channel_attention_network( x=self.x_lr, f=self.n_filters, kernel_size=self.kernel_size, reduction=self.reduction, use_bn=self.use_bn, scale=self.img_scale, ) self.output = tf.clip_by_value(self.output * 255., 0., 255.) # l1 loss self.loss = tf.reduce_mean(tf.abs(self.output - self.x_hr)) self.train_op = self.opt.minimize(self.loss, global_step=self.global_step) # metrics self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr, m_val=1)) self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr, m_val=1)) # summaries tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size) tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size) tf.summary.image('generated-hr', self.output, max_outputs=self.batch_size) tf.summary.scalar("loss/l1_loss", self.loss) tf.summary.scalar("metric/psnr", self.psnr) tf.summary.scalar("metric/ssim", self.ssim) tf.summary.scalar("misc/lr", self.lr) # merge summary self.merged = tf.summary.merge_all() # model saver self.saver = tf.train.Saver(max_to_keep=1) self.best_saver = tf.train.Saver(max_to_keep=1) self.writer = tf.summary.FileWriter(self.tf_log, self.sess.graph)
def demo(lr_image, hr_image): model_sr = LapSRN(mode='demo') hr_images_fake, residuals = model_sr.construct_net(lr_image, hr_image) ckpt_path = tf.train.latest_checkpoint('checkpoint') print(ckpt_path) restorer = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: restorer.restore(sess, ckpt_path) hr_image_fake_level_2 = hr_images_fake[ 'hr_image_fake_level_1'] + residuals['residual_level_1'] hr_image_fake_level_2 = tf.clip_by_value(hr_image_fake_level_2, 0, 1) hr_image_fake_level_2 = sess.run(hr_image_fake_level_2) hr_image_fake_level_2 = hr_image_fake_level_2.squeeze() lr_image = sess.run(lr_image) lr_image = lr_image.squeeze() hr_image = sess.run(hr_image) psnr_value = psnr(hr_image.squeeze(), hr_image_fake_level_2.squeeze()) print(psnr_value) imshow(hr_image.squeeze()) imshow(hr_image_fake_level_2)
def test_valid(model_path, version, root): os.system('mkdir -p codes_val/{}'.format(version)) os.system('mkdir -p res_val/{}'.format(version)) bpp = [] psnr = [] ssim = [] load_model(model_path) for filename in os.listdir(root): original = os.path.join(root, filename) #filename = filename[:-4] codes_path = 'codes_val/{}'.format(version) output_path = 'res_val/{}/{}'.format(version, filename) os.system('mkdir -p {}'.format(output_path)) encode_image_with_padding(root, filename, codes_path) codes = codes_path + '/' + filename[:-4] + '.npz' filename = filename[:-4] decode_image_with_padding(codes_path, output_path, filename) compared = output_path + '/' + filename + '.png' bpp.append(utils.calc_bpp(codes, original)) psnr.append(metric.psnr(original, compared)) ssim.append(metric.msssim(compared, original)) return mean(bpp), mean(psnr), mean(ssim)
def test_validation(model_path, version, root): os.system('mkdir -p codes_val/{}'.format(version)) os.system('mkdir -p res_val/{}'.format(version)) bpp = [] psnr = [] ssim = [] for filename in os.listdir(root): original = os.path.join(root, filename) filename = filename[:-4] os.system('mkdir -p res_val/{}/{}'.format(version, filename)) os.system( 'python encoder.py --model {}/encoder.pth --input {} --output codes_val/{}/{} ' .format(model_path, original, version, filename)) os.system( 'python decoder.py --model {}/decoder.pth --input codes_val/{}/{}.npz --output res_val/{}/{} ' .format(model_path, version, filename, version, filename)) codes = 'codes_val/{}/{}.npz'.format(version, filename) compared = 'res_val/{}/{}/00.png'.format(version, filename) bpp.append(utils.calc_bpp(codes, original)) psnr.append(metric.psnr(original, compared)) ssim.append(metric.msssim(compared, original)) return mean(bpp), mean(psnr), mean(ssim)
def sample_test100_outputs2(model, test_loader, device, checkpoint_path, modelName='sample'): ''' samples all 100 images ''' print(f"Outputs sampler: device = {device}") model.to(device) #for my states checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint['model_state_dict']) #for zhang #model.apply(weights_init) #weights_init undefined #model.load_state_dict(torch.load(checkpoint_path)) n_images = 1 #save 1 image per batch imgCounter = 0 criterion = nn.MSELoss() avg_loss = 0 n_batches = 0 psnrList = [] ssimList = [] print(f"Running output sampling with model states from {checkpoint_path}") with torch.no_grad(): for idx, batch in enumerate(test_loader): hazyImgs = batch['hazyImg'].to(device) clearImgs = batch['clearImg'].to(device) outputs = model(hazyImgs) hazyImgs = hazyImgs.cpu() outputs = outputs.cpu() clearImgs = clearImgs.cpu() #outputs = outputs.numpy() #print(f"Outputs size = {outputs.shape}") #shape = batch x ch x h x w for i in range(len(outputs)): #out_img = np.zeros((outputs.shape[2], 2*outputs.shape[3]), np.uint8) hazyImg = torch.squeeze(hazyImgs[i]).permute(1, 2, 0) hazyImg = hazyImg.numpy() pred_img = torch.squeeze(outputs[i]).permute(1, 2, 0) pred_img = pred_img.numpy() clearImg = torch.squeeze(clearImgs[i]).permute(1, 2, 0) clearImg = clearImg.numpy() print(f"pred image size = {pred_img.shape}") print(f"clear image size = {clearImg.shape}") big_img = np.concatenate((hazyImg, pred_img, clearImg), axis=1) print(f"Big image shape = {big_img.shape}") psnrVal = psnr(torch.tensor(clearImg), torch.tensor(pred_img)) ssimVal = ssim(clearImg, pred_img, multichannel=True) print(f"PSNR = {psnrVal}") print(f" SSIM = {ssimVal}") psnrList.append(psnrVal) ssimList.append(ssimVal) #big_img = Image.fromarray(np.uint8(big_img)) #big_img = Image.fromarray(np.uint8(big_img)*255) #big_img.save(f"../outputs/outImage_{i}.jpg") #cv2.imwrite(f"../outputs/outImage_{i}.jpg", np.uint8(big_img*255)) #cv2.imwrite(f"../outputs/outImage_{imgCounter}.jpg", cv2.cvtColor(np.uint8(big_img*255), cv2.COLOR_RGB2BGR)) cv2.imwrite( f"../final_outputs/{imgCounter}_hazy.jpg", cv2.cvtColor(np.uint8(hazyImg * 255), cv2.COLOR_RGB2BGR)) cv2.imwrite( f"../final_outputs/{imgCounter}_clear.jpg", cv2.cvtColor(np.uint8(clearImg * 255), cv2.COLOR_RGB2BGR)) cv2.imwrite( f"../final_outputs/{imgCounter}_pred_{modelName}.jpg", cv2.cvtColor(np.uint8(pred_img * 255), cv2.COLOR_RGB2BGR)) imgCounter += 1 #break pickle.dump(psnrList, open(f"../final_outputs/{modelName}_psnrList.p", 'wb')) pickle.dump(ssimList, open(f"../final_outputs/{modelName}_ssimList.p", 'wb'))
def test100(model, test100_loader, device, checkpoint_path): print(f"Evaluation: device = {device}") model.to(device) checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint['model_state_dict']) criterion = nn.MSELoss() avg_loss = 0 n_batches = 0 avg_psnr = 0 avg_ssim = 0 no_samples = 0 #redundancy - my way, last batch might not have batch size number of elements print( f"Running model evaluation on test100 dataset with states from {checkpoint_path}" ) with torch.no_grad(): for idx, batch in enumerate(test100_loader): hazyImgs = batch['hazyImg'].to(device) clearImgs = batch['clearImg'].to(device) outputs = model(hazyImgs) loss = criterion(outputs, clearImgs) avg_loss += loss.item() n_batches = idx hazyImgs = hazyImgs.cpu() outputs = outputs.cpu() clearImgs = clearImgs.cpu() for i in range(len(outputs)): hazyImg = torch.squeeze(hazyImgs[i]).permute(1, 2, 0) hazyImg = hazyImg.numpy() pred_img = torch.squeeze(outputs[i]).permute(1, 2, 0) pred_img = pred_img.numpy() clearImg = torch.squeeze(clearImgs[i]).permute(1, 2, 0) clearImg = clearImg.numpy() avg_psnr += psnr(torch.tensor(clearImg), torch.tensor(pred_img)) avg_ssim += ssim(clearImg, pred_img, multichannel=True) no_samples += 1 n_samples = n_batches * test100_loader.batch_size avg_loss /= (n_samples) avg_psnr /= no_samples avg_ssim /= no_samples print( f"Test100: Average testing loss for approx {n_samples} test images = {avg_loss}" ) #approx because last batch need not have batch_size no of test images print(f"Test100: Number of samples = {no_samples}") print(f"Test100: Average PSNR = {avg_psnr}") print(f"Test100: Average SSIM = {avg_ssim}")
if torch.cuda.is_available(): torch.set_default_tensor_type("torch.cuda.FloatTensor") torch.backends.cudnn.benchmark = True MAX_EPOCHS = 10 resize = Resize((128, 128)) dataset = Dataset("datasets/test/test12", resize) loader = DataLoader(dataset, batch_size=4) model = PReNet_r(recurrent_iter=6).to(device) optimizer = Adam(model.parameters(), lr=0.001) scheduler = MultiStepLR(optimizer, milestones=[30, 50, 80], gamma=0.2) for epoch in range(MAX_EPOCHS): scheduler.step() for x, y in loader: x = x.to(device) y = y.to(device) y_pred = model(x) loss = -ssim(y, y_pred) loss.backward() with torch.no_grad(): mse = torch.mean((y - y_pred)**2) p = psnr(mse) s = -loss print(loss.item(), p.item()) optimizer.step() optimizer.zero_grad()
def get_psnr(original, compared): return psnr(as_img_array(original), as_img_array(compared))
body = tfutil.conv2d(x, f=f, k=kernel_size, name="conv2d-body") body = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-body") body += head # tf.math.add(body, head) # 3. tail x = self.up_scaling(body, f, scale, name='up-scaling') tail = tfutil.conv2d(x, f=self.n_channel, k=kernel_size, name="conv2d-tail") # (-1, 384, 384, 3) tail = tfu.conv2d(x, f=self.n_channel, k=kernel_size, name="conv2d-tail") # (-1, 384, 384, 3) x = self.image_processing(tail, sign=1, name='post-processing') return x @@ -236,8 +237,8 @@ def build_model(self): self.train_op = self.opt.minimize(self.loss, global_step=self.global_step) # metrics self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr, m_val=1.)) self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr, m_val=1.)) self.psnr = tf.reduce_mean(metric.psnr(self.output, self.x_hr, m_val=1)) self.ssim = tf.reduce_mean(metric.ssim(self.output, self.x_hr, m_val=1)) # summaries tf.summary.image('lr', self.x_lr, max_outputs=self.batch_size) tf.summary.image('hr', self.x_hr, max_outputs=self.batch_size) tf.summary.image('generated-hr', self.output, max_outputs=self.batch_size) tf.summary.scalar("loss/l1_loss", self.loss) tf.summary.scalar("metric/psnr", self.psnr) tf.summary.scalar("metric/ssim", self.ssim) tf.summary.scalar("misc/lr", self.lr) # merge summary self.merged = tf.summary.merge_all() # model saver