def evaluate_ideal(self, lr, hr): if self.model == 'ESRGAN': lr = lr * 1.0 / 255.0 h, w, d = lr.shape hrh, hrw, _ = hr.shape canvas = np.zeros((h * 4, w * 4, 3)) size = self.PATCH_SIZE stride = self.PATCH_SIZE // 2 pad = self.PATCH_SIZE // 8 info = {'assignment': np.zeros((hrh, hrw))} for i in range(0, h - 1, stride): for j in range(0, w - 1, stride): lr_img = lr[i:i + size] if i + size < h else lr[i:] lr_img = lr_img[:, j:j + size, :] if j + size < w else lr_img[:, j:] #lr_img = torch.from_numpy(np.transpose(lr_img[:,:,[2,1,0]],(2,0,1))).float() lr_img = torch.FloatTensor(lr_img).to(self.device) lr_img = lr_img.permute((2, 0, 1)).unsqueeze(0) #lr_img = lr_img.to(self.device) hr_img = hr[i * self.upsize:(i + size) * self.upsize, j * self.upsize:(j + size) * self.upsize, :] psnrscores = [] ssimscores = [] sr_predictions = [] for sisr in self.SRmodels: hr_hat = sisr(lr_img) hr_hat = hr_hat.squeeze(0).permute(1, 2, 0).data.cpu().numpy() if self.model == 'ESRGAN': hr_hat = hr_hat * 255.0 hr_hat = np.clip(hr_hat, 0, 255) sr_predictions.append(hr_hat) psnr, ssim = util.calc_metrics(hr_img, hr_hat, crop_border=self.upsize) psnrscores.append(psnr) ssimscores.append(ssim) top, top_ = (0, 0) if i == 0 else ((i + pad) * self.upsize, pad * self.upsize) bot, bot_ = (hrh, size * self.upsize) if i + size >= h else ( (i + size - pad) * self.upsize, -pad * self.upsize) lef, lef_ = (0, 0) if j == 0 else ((j + pad) * self.upsize, pad * self.upsize) rig, rig_ = (hrw, size * self.upsize) if j + size >= w else ( (j + size - pad) * self.upsize, -pad * self.upsize) idx = psnrscores.index(max(psnrscores)) canvas[top:bot, lef:rig] = sr_predictions[idx][top_:bot_, lef_:rig_] info['assignment'][top:bot, lef:rig] = idx psnr, ssim = util.calc_metrics(hr, canvas, crop_border=self.upsize) info['psnr'] = psnr info['ssim'] = ssim info['SRimg'] = canvas.astype(np.uint8) return psnr, ssim, info
def evaluate_ideal(self,lr,hr): lr = lr * 1.0 / 255 h,w,d = lr.shape hrh,hrw,_ = hr.shape canvas = np.zeros((h*4,w*4,3)) size = self.PATCH_SIZE stride = self.PATCH_SIZE // 2 pad = self.PATCH_SIZE // 8 info = [] for i in range(0,h-1,stride): for j in range(0,w-1,stride): lr_img = lr[i:i+size] if i+size < h else lr[i:] lr_img = lr_img[:,j:j+size,:] if j+size < w else lr_img[:,j:] lr_img = torch.from_numpy(np.transpose(lr_img[:,:,[2,1,0]],(2,0,1))).float() lr_img = lr_img.unsqueeze(0) lr_img = lr_img.to(self.device) hr_img = hr[i*self.upsize:(i+size)*self.upsize,j*self.upsize:(j+size)*self.upsize,:] psnrscores = [] ssimscores = [] sr_predictions = [] for sisr in self.SRmodels: hr_hat = sisr(lr_img).data.squeeze().float().cpu().clamp_(0,1).numpy() hr_hat = np.transpose(hr_hat[[2,1,0],:,:],(1,2,0)) hr_hat = (hr_hat * 255.0).round() sr_predictions.append(hr_hat) psnr,ssim = util.calc_metrics(hr_img,hr_hat,crop_border=self.upsize) psnrscores.append(psnr) ssimscores.append(ssim) top,top_= (0,0) if i == 0 else ((i+pad)*self.upsize,pad*self.upsize) bot,bot_ = (hrh,size*self.upsize) if i+size >= h else ((i+size-pad)*self.upsize,-pad*self.upsize) lef,lef_ = (0,0) if j == 0 else ((j+pad)*self.upsize,pad*self.upsize) rig,rig_ = (hrw,size*self.upsize) if j+size >= w else ((j+size-pad)*self.upsize,-pad*self.upsize) info.append(max(psnrscores)) idx = psnrscores.index(max(psnrscores)) canvas[top:bot,lef:rig] = sr_predictions[idx][top_:bot_,lef_:rig_] #cv2.imshow('srimg',canvas.astype(np.uint8)) #cv2.imshow('gtimg',hr.astype(np.uint8)) #cv2.waitKey(1) psnr,ssim = util.calc_metrics(hr,canvas,crop_border=self.upsize) print(psnr,ssim) return psnr,ssim,np.array(info)
def evaluate_baseline(self, lr, hr): if self.model == 'ESRGAN': lr = lr * 1.0 / 255.0 img = torch.FloatTensor(lr).to(self.device) lr_img = img.permute((2, 0, 1)).unsqueeze(0) self.SRmodels[0].eval() SR = self.SRmodels[0](lr_img) SR = SR.squeeze(0).permute(1, 2, 0).data.cpu().numpy() if self.model == 'ESRGAN': SR = np.clip(SR * 255.0, 0, 255) psnr, ssim = util.calc_metrics(hr, SR, 4) elif self.model == 'RCAN': SR = np.clip(SR, 0, 255) psnr, ssim = util.calc_metrics(hr, SR, 4) return psnr, ssim, SR
def Test(solver, dataloader, solver_log, current_epoch): psnr_list = [] ssim_list = [] val_loss = [] for iter, batch in enumerate(dataloader): solver.feed_data(batch) iter_loss = solver.test() val_loss.append(iter_loss) ##### calculate psnr/ssim metrics ##### visuals = solver.get_current_visual() psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) psnr_list.append(psnr) ssim_list.append(ssim) print("PSNR: {:.2f} SSIM: {:.4f} Loss: {:.3f}".format( sum(psnr_list) / len(psnr_list), sum(ssim_list) / len(ssim_list), sum(val_loss) / len(val_loss))) return sum(val_loss)/len(val_loss), sum(psnr_list)/len(psnr_list),\ sum(ssim_list)/len(ssim_list)
def evaluate_baseline(self,lr,hr): #lr = lr * 1.0 / 255 img = torch.FloatTensor(lr).to(self.device) lr_img = img.permute((2,0,1)).unsqueeze(0) self.SRmodels[0].eval() SR = self.SRmodels[0](lr_img) SR = SR.squeeze(0).permute(1,2,0).data.cpu().numpy() psnr,ssim = util.calc_metrics(hr,SR,4) SR = (SR).round().astype(np.uint8) return psnr,ssim,SR
def evaluate_baseline(self, lr, hr): lr = lr * 1.0 / 255 lr_img = torch.from_numpy(lr).to(self.device).permute( 2, 0, 1).unsqueeze(0).float() SR = self.SRmodels[0](lr_img).data.squeeze().permute(1, 2, 0).cpu().clamp_( 0, 1).numpy() SR = (SR * 255).round().astype(np.uint8) psnr, ssim = util.calc_metrics(hr, SR, crop_border=self.upsize) return psnr, ssim, SR
def applySISR(self,lr,action,hr): self.SRoptimizers[action].zero_grad() hr_hat = self.SRmodels[action](lr) loss = F.l1_loss(hr_hat,hr) loss.backward() self.SRoptimizers[action].step() hr_hat = hr_hat.squeeze(0).permute(1,2,0); hr = hr.squeeze(0).permute(1,2,0) hr_hat = hr_hat.detach().cpu().numpy() hr = hr.detach().cpu().numpy() psnr,ssim = util.calc_metrics(hr_hat,hr,crop_border=self.UPSIZE) return hr_hat, psnr, ssim, loss.item()
def Test(global_solver, val_loader, solver_log, current_r): psnr_list = [] ssim_list = [] val_loss_list = [] for iter, batch in enumerate(val_loader): global_solver.feed_data(batch) iter_loss = global_solver.test() val_loss_list.append(iter_loss) ##### Calculate psnr/ssim metrics ##### visuals = global_solver.get_current_visual() psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) psnr_list.append(psnr) ssim_list.append(ssim) # ##### record loss/psnr/ssim ##### solver_log['records']['val_loss'].append(' ') solver_log['records']['val_loss'].append(sum(val_loss_list)/len(val_loss_list)) solver_log['records']['psnr'].append(' ') solver_log['records']['psnr'].append(sum(psnr_list)/len(psnr_list)) solver_log['records']['ssim'].append(' ') solver_log['records']['ssim'].append(sum(ssim_list)/len(ssim_list)) ##### record the best epoch ##### round_is_best = False if solver_log['best_pred'] < (sum(psnr_list)/len(psnr_list)): solver_log['best_pred'] = (sum(psnr_list)/len(psnr_list)) round_is_best = True solver_log['best_round'] = current_r print("PSNR: %.2f SSIM: %.4f Loss: %.6f Best PSNR: %.2f in Round: [%d]" %(sum(psnr_list)/len(psnr_list), sum(ssim_list)/len(ssim_list), sum(val_loss_list)/len(val_loss_list), solver_log['best_pred'], solver_log['best_round'])) global_solver.set_current_log(solver_log) global_solver.save_checkpoint(current_r, round_is_best) global_solver.save_current_log() return sum(val_loss_list)/len(val_loss_list), sum(psnr_list)/len(psnr_list),\ sum(ssim_list)/len(ssim_list)
def Validate(client_solver, val_loader): psnr_list = [] ssim_list = [] val_loss_list = [] for iter, batch in enumerate(val_loader): client_solver.feed_data(batch) iter_loss = client_solver.test() val_loss_list.append(iter_loss) ##### calculate psnr/ssim metrics ##### visuals = client_solver.get_current_visual() psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) psnr_list.append(psnr) ssim_list.append(ssim) print("PSNR: %.2f SSIM: %.4f" %(sum(psnr_list)/len(psnr_list), sum(ssim_list)/len(ssim_list))) return sum(psnr_list)/len(psnr_list), sum(ssim_list)/len(ssim_list)
def validate(self): scores = {} self.model.eval() for vset in self.validationsets: scores[vset] = [] HR_dir = os.path.join(self.hr_rootdir, vset) LR_dir = os.path.join(os.path.join(self.lr_rootdir, vset), self.resfolder) #APPLY MODEL ON LR IMAGES HR_files = [os.path.join(HR_dir, f) for f in os.listdir(HR_dir)] LR_files = [os.path.join(LR_dir, f) for f in os.listdir(LR_dir)] HR_files.sort() LR_files.sort() #PSNR/SSIM SCORE FOR CURRENT VALIDATION SET for hr_file, lr_file in zip(HR_files, LR_files): hr = cv2.imread(hr_file, cv2.IMREAD_COLOR) lr = cv2.imread(lr_file, cv2.IMREAD_COLOR) * 1.0 / 255 lr_img = torch.from_numpy( np.transpose(lr[:, :, [2, 1, 0]], (2, 0, 1))).float() lr_img = lr_img.unsqueeze(0) lr_img = lr_img.to(self.device) out = self.model(lr_img).data.squeeze().float().cpu().clamp_( 0, 1).numpy() out = np.transpose(out[[2, 1, 0], :, :], (1, 2, 0)) out = (out * 255.0).round() psnr, ssim = util.calc_metrics(hr, out, crop_border=self.upsize) print(hr_file, psnr) scores[vset].append([psnr, ssim]) mu_psnr = np.mean(np.array(scores[vset])[:, 0]) mu_ssim = np.mean(np.array(scores[vset])[:, 1]) print(vset + ' scores', mu_psnr, mu_ssim)
def main(): parser = argparse.ArgumentParser( description='Test Super Resolution Models') parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.') opt = option.parse(parser.parse_args().opt) opt = option.dict_to_nonedict(opt) # initial configure scale = opt['scale'] degrad = opt['degradation'] network_opt = opt['networks'] model_name = network_opt['which_model'].upper() # create folders util.mkdir_and_rename(opt['path']['res_root']) option.save(opt) # create test dataloader bm_names = [] test_loaders = [] for ds_name, dataset_opt in sorted(opt['datasets'].items()): test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_loaders.append(test_loader) print('===> Test Dataset: [%s] Number of images: [%d]' % (dataset_opt['name'], len(test_set))) bm_names.append(dataset_opt['name']) # create solver (and load model) solver = create_solver(opt) # Test phase print('===> Start Test') print("==================================================") print("Method: %s || Scale: %d || Degradation: %s" % (model_name, scale, degrad)) for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]" % bm) sr_list = [] path_list = [] total_psnr = [] total_ssim = [] total_time = [] res_dict = OrderedDict() need_HR = False if test_loader.dataset.__class__.__name__.find( 'HR') < 0 else True for iter, batch in tqdm(enumerate(test_loader), total=len(test_loader)): solver.feed_data(batch, need_HR=need_HR, need_landmark=False) # calculate forward time t0 = time.time() solver.test() t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) sr_list.append(visuals['SR'][-1]) # calculate PSNR/SSIM metrics on Python if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'][-1], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) path_list.append( os.path.basename(batch['HR_path'][0]).replace( 'HR', model_name)) # print( # "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." # % (iter + 1, len(test_loader), # os.path.basename(batch['HR_path'][0]), psnr, ssim, # (t1 - t0))) res_dict[path_list[-1]] = { 'psnr': psnr, 'ssim': ssim, 'time': t1 - t0 } else: path_list.append(os.path.basename(batch['LR_path'][0])) # print("[%d/%d] %s || Timer: %.4f sec ." % # (iter + 1, len(test_loader), # os.path.basename(batch['LR_path'][0]), (t1 - t0))) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) average_res_str = "PSNR: %.2f SSIM: %.4f Speed: %.4f" % \ (sum(total_psnr) / len(total_psnr), sum(total_ssim) / len(total_ssim), sum(total_time) / len(total_time)) print(average_res_str) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time) / len(total_time))) # save SR results for further evaluation on MATLAB save_img_path = os.path.join(opt['path']['res_root'], bm) print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path)) if not os.path.exists(save_img_path): os.makedirs(save_img_path) for img, name in zip(sr_list, path_list): imageio.imwrite(os.path.join(save_img_path, name), img) if need_HR: with open(os.path.join(save_img_path, 'result.json'), 'w') as f: json.dump(res_dict, f, indent=2) with open(os.path.join(save_img_path, 'average_result.txt'), 'w') as f: f.write(average_res_str + '\n') print("==================================================") print("===> Finished !")
def getstats(self, sr, hr): psnr, ssim = util.calc_metrics(hr, sr, crop_border=self.upsize) return psnr, ssim
def main(): parser = argparse.ArgumentParser( description='Test Super Resolution Models') parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.') opt = option.parse(parser.parse_args().opt) opt = option.dict_to_nonedict(opt) # initial configure scale = opt['scale'] degrad = opt['degradation'] network_opt = opt['networks'] model_name = network_opt['which_model'].upper() if opt['self_ensemble']: model_name += 'plus' # create test dataloader bm_names = [] test_loaders = [] for _, dataset_opt in sorted(opt['datasets'].items()): test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_loaders.append(test_loader) print('===> Test Dataset: [%s] Number of images: [%d]' % (test_set.name(), len(test_set))) bm_names.append(test_set.name()) # create solver (and load model) solver = create_solver(opt) # Test phase print('===> Start Test') print("==================================================") print("Method: %s || Scale: %d || Degradation: %s" % (model_name, scale, degrad)) for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]" % bm) sr_list = [] path_list = [] total_psnr = [] total_ssim = [] total_time = [] need_HR = False if test_loader.dataset.__class__.__name__.find( 'LRHR') < 0 else True for iter, batch in enumerate(test_loader): solver.feed_data(batch, need_HR=need_HR) # calculate forward time t0 = time.time() solver.test() t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) sr_list.append(visuals['SR']) # calculate PSNR/SSIM metrics on Python if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) path_list.append( os.path.basename(batch['HR_path'][0]).replace( 'HR', model_name)) print( "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), psnr, ssim, (t1 - t0))) else: path_list.append(os.path.basename(batch['LR_path'][0])) print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), (t1 - t0))) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) print("PSNR: %.2f SSIM: %.4f Speed: %.4f" % (sum(total_psnr) / len(total_psnr), sum(total_ssim) / len(total_ssim), sum(total_time) / len(total_time))) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time) / len(total_time))) # save SR results for further evaluation on MATLAB if need_HR: save_img_path = os.path.join('./results/SR/' + degrad, model_name, bm, "x%d" % scale) else: save_img_path = os.path.join('./results/SR/' + bm, model_name, "x%d" % scale) print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path)) if not os.path.exists(save_img_path): os.makedirs(save_img_path) for img, name in zip(sr_list, path_list): imageio.imwrite(os.path.join(save_img_path, name), img) print("==================================================") print("===> Finished !")
def main(): parser = argparse.ArgumentParser(description='Test Super Resolution Models') parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.') opt = option.parse(parser.parse_args().opt) opt = option.dict_to_nonedict(opt) # make sure the CUDA_VISIBLE_DEVICES is set before import torch. from utils import util from solvers import create_solver from datasets import create_dataloader from datasets import create_dataset # initial configure scale = opt['scale'] degrad = opt['degradation'] network_opt = opt['networks'] model_name = network_opt['which_model'].upper() if opt['self_ensemble']: model_name += 'plus' # create test dataloader bm_names =[] test_loaders = [] percent10 = True for _, dataset_opt in sorted(opt['datasets'].items()): test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_loaders.append(test_loader) print('===> Test Dataset: [%s] Number of images: [%d]' % (test_set.name(), len(test_set))) bm_names.append(test_set.name()) # create solver (and load model) solver = create_solver(opt) # Test phase print('===> Start Test') print("==================================================") print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad)) for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]"%bm) sr_list = [] path_list = [] total_psnr = [] total_ssim = [] total_time = [] need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True for iter, batch in enumerate(test_loader): solver.feed_data(batch, need_HR=need_HR) # calculate forward time t0 = time.time() solver.test() t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) sr_list.append(visuals['SR']) # calculate PSNR/SSIM metrics on Python # 这里仅支持batch size = 1的情况!!! if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) path_list.append(os.path.basename(batch['HR_path'][0]).replace('HR', model_name)) print("[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter+1, len(test_loader), os.path.basename(batch['LR_path'][0]), psnr, ssim, (t1 - t0))) else: file_dir = batch['LR_path'][0].split('/')[-2] path_list.append(os.path.join(file_dir, os.path.basename(batch['LR_path'][0]))) print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.join(file_dir, os.path.basename(batch['LR_path'][0])), (t1 - t0))) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) print("PSNR: %.2f SSIM: %.4f Speed: %.4f" % (sum(total_psnr)/len(total_psnr), sum(total_ssim)/len(total_ssim), sum(total_time)/len(total_time))) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time)/len(total_time))) if need_HR: save_img_path = os.path.join('../submit/SR/'+degrad, model_name, bm, "x%d"%scale) else: save_img_path = os.path.join('../submit/') print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path)) middle_name = 'h_Res' if percent10 else 'h_Sub25_Res' filter_idx = -1 if percent10 else -7 if not os.path.exists(save_img_path): os.makedirs(save_img_path) for img, name in zip(sr_list, path_list): store_path = os.path.join(save_img_path, name) base_dir = os.path.dirname(store_path)[:filter_idx] + middle_name if not os.path.exists(base_dir): os.makedirs(base_dir) store_path = os.path.join(base_dir, os.path.basename(name)) print('write into {}.'.format(store_path)) imageio.imwrite(store_path, img) percent10 = False print("==================================================") print("===> Finished !")
def main(): parser = argparse.ArgumentParser( description='Train Super Resolution Models') parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.') opt = option.parse(parser.parse_args().opt) print('Inside Train.py') print(opt['datasets']['train']['data_path']) # random seed seed = opt['solver']['manual_seed'] if seed is None: seed = random.randint(1, 10000) print("===> Random Seed: [%d]" % seed) random.seed(seed) torch.manual_seed(seed) # create train and val dataloader for phase, dataset_opt in sorted(opt['datasets'].items()): if phase == 'train': train_set = create_dataset(dataset_opt) train_loader = create_dataloader(train_set, dataset_opt) print('===> Train Dataset: %s Number of images: [%d]' % (train_set.name(), len(train_set))) if train_loader is None: raise ValueError("[Error] The training data does not exist") elif phase == 'val': val_set = create_dataset(dataset_opt) val_loader = create_dataloader(val_set, dataset_opt) print('===> Val Dataset: %s Number of images: [%d]' % (val_set.name(), len(val_set))) else: raise NotImplementedError( "[Error] Dataset phase [%s] in *.json is not recognized." % phase) solver = create_solver(opt) scale = opt['scale'] model_name = opt['networks']['which_model'].upper() print('===> Start Train') print("==================================================") solver_log = solver.get_current_log() NUM_EPOCH = int(opt['solver']['num_epochs']) start_epoch = solver_log['epoch'] print("Method: %s || Scale: %d || Epoch Range: (%d ~ %d)" % (model_name, scale, start_epoch, NUM_EPOCH)) for epoch in range(start_epoch, NUM_EPOCH + 1): print('\n===> Training Epoch: [%d/%d]... Learning Rate: %f' % (epoch, NUM_EPOCH, solver.get_current_learning_rate())) # Initialization solver_log['epoch'] = epoch # Train model train_loss_list = [] with tqdm(total=len(train_loader), desc='Epoch: [%d/%d]' % (epoch, NUM_EPOCH), miniters=1) as t: for iter, batch in enumerate(train_loader): solver.feed_data(batch) iter_loss = solver.train_step() batch_size = batch['LR'].size(0) train_loss_list.append(iter_loss * batch_size) t.set_postfix_str("Batch Loss: %.4f" % iter_loss) t.update() solver_log['records']['train_loss'].append( sum(train_loss_list) / len(train_set)) solver_log['records']['lr'].append(solver.get_current_learning_rate()) print('\nEpoch: [%d/%d] Avg Train Loss: %.6f' % (epoch, NUM_EPOCH, sum(train_loss_list) / len(train_set))) print('===> Validating...') psnr_list = [] ssim_list = [] val_loss_list = [] for iter, batch in enumerate(val_loader): solver.feed_data(batch) iter_loss = solver.test() val_loss_list.append(iter_loss) # calculate evaluation metrics visuals = solver.get_current_visual() # print(visuals['SR'].shape) psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) psnr_list.append(psnr) ssim_list.append(ssim) # if opt["save_image"]: # solver.save_current_visual(epoch, iter) solver_log['records']['val_loss'].append( sum(val_loss_list) / len(val_loss_list)) solver_log['records']['psnr'].append(sum(psnr_list) / len(psnr_list)) solver_log['records']['ssim'].append(sum(ssim_list) / len(ssim_list)) # record the best epoch epoch_is_best = False if solver_log['best_pred'] < (sum(psnr_list) / len(psnr_list)): solver_log['best_pred'] = (sum(psnr_list) / len(psnr_list)) epoch_is_best = True solver_log['best_epoch'] = epoch print( "[%s] PSNR: %.2f SSIM: %.4f Loss: %.6f Best PSNR: %.2f in Epoch: [%d]" % (val_set.name(), sum(psnr_list) / len(psnr_list), sum(ssim_list) / len(ssim_list), sum(val_loss_list) / len(val_loss_list), solver_log['best_pred'], solver_log['best_epoch'])) solver.set_current_log(solver_log) solver.save_checkpoint(epoch, epoch_is_best) solver.save_current_log() # update lr solver.update_learning_rate(epoch) print('===> Finished !')
def getstats(self, sr, hr): sr = (sr * 255).clip(0, 255) hr = hr * 255 psnr, ssim = util.calc_metrics(hr, sr, crop_border=self.upsize) return psnr, ssim
def main(): torch.backends.cudnn.benchmark = True args = option.add_args() opt = option.parse(args.opt, nblocks=args.nblocks, nlayers=args.nlayers, iterations=args.iterations, trained_model=args.trained_path, lr_path=args.lr_path) # fix random seed # seed_torch(opt['solver']['manual_seed']) # create train and val dataloader for phase, dataset_opt in sorted(opt['datasets'].items()): if phase == 'train': train_set = create_dataset(dataset_opt) train_loader = create_dataloader(train_set, dataset_opt) print('===> Train Dataset: %s Number of images: [%d]' % (train_set.name(), len(train_set))) if train_loader is None: raise ValueError("[Error] The training data does not exist") elif phase == 'val': val_set = create_dataset(dataset_opt) val_loader = create_dataloader(val_set, dataset_opt) print('===> Val Dataset: %s Number of images: [%d]' % (val_set.name(), len(val_set))) else: raise NotImplementedError( "[Error] Dataset phase [%s] in *.json is not recognized." % phase) solver = create_solver(opt) scale = opt['scale'] model_name = opt['networks']['which_model'].upper() print('===> Start Train') print("==================================================") solver_log = solver.get_current_log() NUM_EPOCH = int(opt['solver']['num_epochs']) start_epoch = solver_log['epoch'] print("Method: %s || Scale: %d || Epoch Range: (%d ~ %d)" % (model_name, scale, start_epoch, NUM_EPOCH)) for epoch in range(start_epoch, NUM_EPOCH + 1): print('\n===> Training Epoch: [%d/%d]... Learning Rate: %f' % (epoch, NUM_EPOCH, solver.get_current_learning_rate())) # Initialization solver_log['epoch'] = epoch # Train model train_loss_list = [] with tqdm(total=len(train_loader), desc='Epoch: [%d/%d]' % (epoch, NUM_EPOCH), miniters=1) as t: for iter, batch in enumerate(train_loader): solver.feed_data(batch) iter_loss = solver.train_step() batch_size = batch['LR'].size(0) train_loss_list.append(iter_loss * batch_size) t.set_postfix_str("Batch Loss: %.4f" % iter_loss) t.update() solver_log['records']['train_loss'].append( sum(train_loss_list) / len(train_set)) solver_log['records']['lr'].append(solver.get_current_learning_rate()) print('\nEpoch: [%d/%d] Avg Train Loss: %.6f' % (epoch, NUM_EPOCH, sum(train_loss_list) / len(train_set))) print('===> Validating...', ) psnr_list = [] ssim_list = [] val_loss_list = [] for iter, batch in enumerate(val_loader): solver.feed_data(batch) iter_loss = solver.test() val_loss_list.append(iter_loss) # calculate evaluation metrics visuals = solver.get_current_visual() psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) psnr_list.append(psnr) ssim_list.append(ssim) if opt["save_image"]: solver.save_current_visual(epoch, iter) solver_log['records']['val_loss'].append( sum(val_loss_list) / len(val_loss_list)) solver_log['records']['psnr'].append(sum(psnr_list) / len(psnr_list)) solver_log['records']['ssim'].append(sum(ssim_list) / len(ssim_list)) # record the best epoch epoch_is_best = False if solver_log['best_pred'] < (sum(psnr_list) / len(psnr_list)): solver_log['best_pred'] = (sum(psnr_list) / len(psnr_list)) epoch_is_best = True solver_log['best_epoch'] = epoch print( "[%s] PSNR: %.2f SSIM: %.4f Loss: %.6f Best PSNR: %.2f in Epoch: [%d]" % (val_set.name(), sum(psnr_list) / len(psnr_list), sum(ssim_list) / len(ssim_list), sum(val_loss_list) / len(val_loss_list), solver_log['best_pred'], solver_log['best_epoch'])) solver.set_current_log(solver_log) solver.save_checkpoint(epoch, epoch_is_best) solver.save_current_log() # update lr # solver.update_learning_rate() solver.scheduler.step() print('===> Finished !')
def SR(solver, opt, model_name): # dataset가져오기-많이 걸리면 0.002 bm_names = [] test_loaders = [] for _, dataset_opt in sorted(opt['datasets'].items()): start = time.time() test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_loaders.append(test_loader) print( '===> Test Dataset: [%s] Number of images: [%d] elapsed time: %.4f sec' % (test_set.name(), len(test_set), time.time() - start)) bm_names.append(test_set.name()) #Testset개수만큼 SR for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]" % bm) sr_list = [] path_list = [] total_psnr = [] total_ssim = [] total_time = [] scale = 4 need_HR = False if test_loader.dataset.__class__.__name__.find( 'LRHR') < 0 else True for iter, batch in enumerate(test_loader): solver.feed_data(batch, need_HR=need_HR) # 시간측정 t0 = time.time() solver.test() #SR t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) sr_list.append(visuals['SR']) # calculate PSNR/SSIM metrics on Python if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) path_list.append( os.path.basename(batch['HR_path'][0]).replace( 'HR', model_name)) print( "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), psnr, ssim, (t1 - t0))) else: path_list.append(os.path.basename(batch['LR_path'][0])) print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), (t1 - t0))) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) print("PSNR: %.2f SSIM: %.4f Speed: %.4f" % (sum(total_psnr) / len(total_psnr), sum(total_ssim) / len(total_ssim), sum(total_time) / len(total_time))) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time) / len(total_time))) # save SR results for further evaluation on MATLAB if need_HR: save_img_path = os.path.join('./results/SR/' + degrad, model_name, bm, "x%d" % scale) else: save_img_path = os.path.join('./results/SR/' + bm, model_name, "x%d" % scale) if not os.path.exists(save_img_path): os.makedirs(save_img_path) for img, name in zip(sr_list, path_list): s = time.time() #matplotlib.image.save(os.path.join(save_img_path, name),img) cv2.imwrite( os.path.join(save_img_path, name), cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) # 0.609sec 평균 이미지 하나당 0.07sec print("NAME: %s DOWNLOAD TIME:%s\n" % (name, time.time() - s)) #save(os.path.join(save_img_path, name),img) 5.3sec #Image.fromarray(img).save(os.path.join(save_img_path, name)) 2.4sec #imageio.imwrite(os.path.join(save_img_path, name), img) 9.8sec print( "===> Total Saving SR images of [%s]... Save Path: [%s] Time: %s\n" % (bm, save_img_path, time.time() - s)) print("==================================================") print("===> Finished !!")
def main(): args = option.add_args() opt = option.parse(args.opt, nblocks=args.nblocks, nlayers=args.nlayers, iterations=args.iterations, trained_model=args.trained_model, lr_path=args.lr_path ) opt = option.dict_to_nonedict(opt) # initial configure scale = opt['scale'] degrad = opt['degradation'] network_opt = opt['networks'] model_name = network_opt['which_model'].upper() if opt['self_ensemble']: model_name += 'plus' # create test dataloader bm_names =[] test_loaders = [] for _, dataset_opt in sorted(opt['datasets'].items()): test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_loaders.append(test_loader) print('===> Test Dataset: [%s] Number of images: [%d]' % (test_set.name(), len(test_set))) bm_names.append(test_set.name()) # create solver (and load model) solver = create_solver(opt) # Test phase print('===> Start Test') print("==================================================") print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad)) # whether save the SR image? if opt['save_image']: para_save = Paralle_save_img() para_save.begin_background() # with para_save.begin_background() as para_save_imag for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]" % bm) total_psnr = [] total_ssim = [] total_time = [] need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True if need_HR: save_img_path = os.path.join('./results/SR/' + degrad, model_name, bm, "x%d" % scale) else: save_img_path = os.path.join('./results/SR/' + bm, model_name, "x%d" % scale) if not os.path.exists(save_img_path): os.makedirs(save_img_path) for iter, batch in enumerate(test_loader): solver.feed_data(batch, need_HR=need_HR) # calculate forward time t0 = time.time() solver.test() t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) # calculate PSNR/SSIM metrics on Python if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) name = os.path.basename(batch['HR_path'][0]).replace('.', ('_x{}_' + model_name + '.').format(scale)) print("[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename( batch['LR_path'][0]), psnr, ssim, (t1 - t0))) else: print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), (t1 - t0))) if opt['save_image']: name = os.path.basename(batch['LR_path'][0]).replace('.', ('_x{}_' + model_name + '.').format(scale)) para_save.put_image_path(filename=os.path.join(save_img_path, name), img=visuals['SR']) total_psnr, total_ssim = np.array(total_psnr), np.array(total_ssim) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) print("PSNR: %.2f(+/-%.2f) SSIM: %.4f Speed: %.4f" % (total_psnr.mean(), total_psnr.std(), total_ssim.mean(), sum(total_time) / len(total_time))) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time) / len(total_time))) if opt['save_image']: para_save.end_background() print("==================================================") print("===> Finished !")
lrdata.sort() hrdata.sort() psnr_scores = [] ssim_scores = [] #RUN ON SET 5 DATASET for lrname,hrname in zip(lrdata,hrdata): img = imageio.imread(os.path.join(LRDIR,lrname)) hr = imageio.imread(os.path.join(HRDIR,hrname)) img = torch.FloatTensor(img).to(device) img = img.permute((2,0,1)).unsqueeze(0) #MAKE THE INFERENCE model.eval() with torch.no_grad(): sr = model(img) sr = sr.squeeze(0).permute(1,2,0).data.cpu().numpy() # hr = hr psnr,ssim = util.calc_metrics(hr,sr,4) psnr_scores.append(psnr) ssim_scores.append(ssim) print('psnr score: {:.4f} | {}'.format(psnr,lrdata)) print('mean and ssim: ',np.mean(psnr_scores),np.mean(ssim_scores)) checkpoint.done()
def main(): # Test phase print('===> Start Test') for bm, test_loader in zip(bm_names, test_loaders): print("Test set : [%s]"%bm) sr_list = [] path_list = [] total_psnr = [] total_ssim = [] total_time = [] need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True for iter, batch in enumerate(test_loader): solver.feed_data(batch, need_HR=need_HR) # calculate forward time t0 = time.time() solver.test() t1 = time.time() total_time.append((t1 - t0)) visuals = solver.get_current_visual(need_HR=need_HR) sr_list.append(visuals['SR']) # calculate PSNR/SSIM metrics on Python if need_HR: psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale) total_psnr.append(psnr) total_ssim.append(ssim) path_list.append(os.path.basename(batch['HR_path'][0]).replace('HR', model_name)) print("[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter+1, len(test_loader), os.path.basename(batch['LR_path'][0]), psnr, ssim, (t1 - t0))) else: path_list.append(os.path.basename(batch['LR_path'][0])) print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader), os.path.basename(batch['LR_path'][0]), (t1 - t0))) if need_HR: print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm) print("PSNR: %.2f SSIM: %.4f Speed: %.4f" % (sum(total_psnr)/len(total_psnr), sum(total_ssim)/len(total_ssim), sum(total_time)/len(total_time))) else: print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm, sum(total_time)/len(total_time))) # save SR results for further evaluation on MATLAB if need_HR: save_img_path = os.path.join('./results/SR/'+degrad, model_name, bm, "x%d"%scale) else: save_img_path = os.path.join('./results/SR/'+bm, model_name, "x%d"%scale) print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path)) if not os.path.exists(save_img_path): os.makedirs(save_img_path) for img, name in zip(sr_list, path_list): imageio.imwrite(os.path.join(save_img_path, name), img) print("==================================================") print("===> Finished !")