def dostuff(self, direction, a, tree, subs, head, i): '''quantized absolute distance (number of words in b/w). see utility.py''' sub = subs[i] if direction == Feature.LEFT: dist = head.span[0] - sub.span[1] else: dist = sub.span[0] - head.span[1] dist = quantize(dist) a.append(self.make(direction, tree, head, sub, dist))
def count(tree, sentence): if tree.is_terminal(): return None binned_len = tree.binned_span_width()) distance = quantize(len(sentence) - tree.span[1]) ## will be moved into tree ## will be integrated into tree by passing a sentence, but not storing the sentence final_punc = make_punc(sentence[tree.span[1] - 1]) follow_punc = make_punc(sentence[tree.span[1]]) return Heavy(binned_len, distance, tree.label, final_punc, follow_punc)
def extract(self, tree, sentence): if tree.is_terminal(): tree.waitings = [] return [] tree.distance = quantize(len(sentence) - tree.span[1]) ## will probably be moved into tree a = [] # for all non-right-most children for i, sub in enumerate(tree.subs[:-1]): # get its right sibling's first (postag, word) pair right_pos_word = tree.subs[i+1].get_tag_word(0) for waiting in sub.waitings: a.append(self.make(waiting, waiting.get_tag_word(-1), right_pos_word)) if tree.distance == 0: # i am a right-most constituent a.append(self.make(tree, tree.get_tag_word(-1), ("_", "_"))) else: ## there is still stuff to my right, so wait for my parents tree.waitings = tree.subs[-1].waitings + [tree] return a
def test(self): epoch = self.scheduler.last_epoch self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) # tqdm_test = tqdm(self.loader_test, ncols=80) # for idx_img, (lr, hr, filename) in enumerate(tqdm_test): for idx_img, (lr, hr, filename) in enumerate(self.loader_test): filename = filename[0] no_eval = (hr.nelement() == 1) or (self.dim != 2) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(lr, idx_scale) if self.dim == 2: sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: if self.args.testfunc == 'PSNR': eval_results = utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) elif self.args.testfunc == 'mse': eval_results = -torch.nn.functional.mse_loss( sr, hr) elif self.args.testfunc == 'accuracy': eval_results = 1 - torch.nn.functional.l1_loss( (sr > 0).float(), hr) else: print('Unknown function for testing', self.args.testfunc) eval_results = 0 eval_acc += eval_results if self.dim != 2: eval_acc += -np.sqrt( np.mean((hr[0].data.cpu().numpy() - sr[0].data.cpu().numpy())**2)) #print('debug hr sr', hr, sr) #print(hr-sr) #(sr - hr).pow(2).mean()) #eval_acc += (hr-sr).data.cpu().pow(2).mean() #np.linalg.norm(hr[0].data.cpu().numpy()-sr[0].data.cpu().numpy()) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\t{}: {:.4f} (Best: {:.4f} @epoch {})'.format( self.args.data_test, scale, self.args.testfunc, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, lrr, hq, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr, lrr, hq = self.prepare(lr, hr, lrr, hq) else: lr, = self.prepare(lr) lr = lr / 255.0 hr = hr / 255.0 hq = hq / 255.0 lrr = lrr / 255.0 [b, c, h, w] = hr.shape phr1, phr2, phr3 = self.model(lr, 3) Img_up = nn.Upsample(scale_factor=2, mode='bilinear') Img_up_4x = nn.Upsample(scale_factor=4, mode='bilinear') phr1_2 = Img_up_4x(phr3) phr2_2 = Img_up(phr2) phr3_2 = phr1 input_step2 = [lr, phr1_2, phr2_2, phr3_2] phr, m1, m2, m3 = self.recompose(input_step2, 3) phr3 = utility.quantize(phr3_2 * 255, self.args.rgb_range) lr = utility.quantize(lr * 255, self.args.rgb_range) hr = utility.quantize(hr * 255, self.args.rgb_range) plr_nf = utility.quantize(lr * 255, self.args.rgb_range) phr = utility.quantize(phr * 255, self.args.rgb_range) phr1_2 = utility.quantize(phr1_2 * 255, self.args.rgb_range) phr2_2 = utility.quantize(phr2_2 * 255, self.args.rgb_range) phr3_2 = utility.quantize(phr3_2 * 255, self.args.rgb_range) m1 = utility.quantize(m1 / 2 * 255, self.args.rgb_range) m2 = utility.quantize(m2 / 2 * 255, self.args.rgb_range) m3 = utility.quantize(m3 / 2 * 255, self.args.rgb_range) save_list = [ hr, lr, phr3, plr_nf, phr, phr1_2, phr2_2, phr3_2, m1, m2, m3 ] if not no_eval: eval_acc += utility.calc_psnr( phr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) if self.args.save_results: self.ckp.save_results(filename, save_list, scale, epoch) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): self.model.eval() with torch.no_grad(): for idx_scale, _ in enumerate(self.scale): self.loader_test.dataset.set_scale(idx_scale) scale = self.args.scale[idx_scale] scale2 = self.args.scale2[idx_scale] eval_psnr = 0 eval_ssim = 0 for idx_img, (lr, hr, filename, _) in enumerate(self.loader_test): filename = filename[0] # prepare LR & HR images no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) lr, hr = self.crop_border(lr, hr, scale, scale2) # inference self.model.get_model().set_scale(scale, scale2) sr = self.model(lr) # evaluation sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_psnr += utility.calc_psnr( sr, hr, [scale, scale2], self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_ssim += utility.calc_ssim( sr, hr, [scale, scale2], benchmark=self.loader_test.dataset.benchmark) # save SR results if self.args.save_results: self.ckp.save_results(filename, save_list, scale) if scale == scale2: print('[{} x{}]\tPSNR: {:.3f} SSIM: {:.4f}'.format( self.args.data_test, scale, eval_psnr / len(self.loader_test), eval_ssim / len(self.loader_test), )) else: print('[{} x{}/x{}]\tPSNR: {:.3f} SSIM: {:.4f}'.format( self.args.data_test, scale, scale2, eval_psnr / len(self.loader_test), eval_ssim / len(self.loader_test), ))
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() if self.args.enable_jit: torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) jit_model = torch.jit.script(self.model) timer_test = utility.timer() time_d = 0 with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) idx = 0 for idx_img, (lr, hr, filename) in enumerate(tqdm_test): if idx >= self.args.warmup: now = time.time() filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] if self.sycl: lr = lr.to('dpcpp') hr = hr.to('dpcpp') with torch.autograd.profiler.profile() as prof: if self.args.enable_jit: sr = jit_model(lr, idx_scale) else: sr = self.model(lr, idx_scale) print(prof.key_averages().table(sort_by="cpu_time_total")) if idx >= self.args.warmup: per_time = time.time() - now time_d += per_time idx += 1 sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: #self.ckp.save_results(filename, save_list, scale) self.ckp.save_results_nopostfix( filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s, ave time: {:.2f}s\n'.format( time_d, time_d / (len(self.loader_test) - self.args.warmup), refresh=True)) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self, test_only=False, starttime=0): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('Evaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale)), False) self.model.eval() # We can use custom forward function def _test_forward(x, scale): if self.args.self_ensemble: return utility.x8_forward(x, self.model, self.args.precision) elif self.args.chop_forward: return utility.chop_forward(x, self.model, scale) else: return self.model(x) set_name = self.args.data_test for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self._scale_change(idx_scale, self.loader_test) for idx_img, ((lr, _), hr, _) in enumerate(self.loader_test): no_eval = isinstance(hr[0], torch._six.string_classes) if no_eval: lr = self.prepare([lr])[0] filename = hr[0] else: lr, hr = self.prepare([lr, hr]) filename = idx_img + 1 rgb_range = self.args.rgb_range timer_test = utility.timer() sr = _test_forward(lr, scale) self.test_time[1] += timer_test.toc() self.test_time[0] += 1 sr = utility.quantize(sr, rgb_range) if no_eval: save_list = [sr] else: eval_acc += utility.calc_PSNR(sr, hr.div(rgb_range), set_name, scale) save_list = [sr, lr.div(rgb_range), hr.div(rgb_range)] if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log_test[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log_test.max(0) performance = 'PSNR: {:.3f}'.format(self.ckp.log_test[-1, idx_scale]) self.ckp.write_log( '[{} x{}]\t{} (Best: {:.3f} from epoch {})'.format( set_name, scale, performance, best[0][idx_scale], best[1][idx_scale] + 1)) is_best = (best[1][0] + 1 == epoch) if test_only: self.ckp.write_log( 'Total time: {:.3f}s\r\nAvg. time: {:.3f}s\n'.format( self.test_time[1], self.test_time[1] / self.test_time[0]), refresh=True) elif starttime != 0: now = datetime.datetime.now() elapsed = now - starttime est = now + (elapsed / epoch) * (self.args.epochs - epoch) self.ckp.write_log("Elapsed: {}\n".format(str(elapsed))) print('Will finish: {}\n'.format( est.strftime('%d-%m-%Y-%H:%M:%S'))) self.ckp.save(self, epoch, is_best=is_best)
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) # eval_acc = 0 eval_accs = [] for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) if self.args.multi_exit: srs = self.model(lr, idx_scale) sr_num = len(srs) for i in range(sr_num): sr_filename = filename[0] + '_' + str(i + 1) sr = utility.quantize(srs[i], self.args.rgb_range) save_list = [sr] if len(eval_accs) <= i: eval_accs.append(0) eval_accs[i] += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d, force_y=self.args.force_y) if i == sr_num - 1: self.ckp.log[ -1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d, force_y=self.args.force_y) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, sr_filename, save_list, scale) else: sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] # self.ckp.log[-1, idx_data, idx_scale] self.ckp.log[ -1, idx_data, idx_scale] += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d, force_y=self.args.force_y) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) # if self.args.multi_exit: # max_eval_acc = 0 # for i in range(len(eval_accs)): # if eval_accs[i] > max_eval_acc: # max_eval_acc = eval_accs[i] # eval_acc = max_eval_acc self.ckp.log[-1, idx_data, idx_scale] /= len(d) # self.ckp.log[-1, idx_data, idx_scale] = eval_acc / len(d) best = self.ckp.log.max(0) if self.args.multi_exit: output_str = "" for i in range(len(eval_accs)): eval_accs[i] = eval_accs[i] / len(self.loader_test) output_str += ( "PSNR " + str(i) + ": %.3f " % eval_accs[i]) self.ckp.write_log( '[{} x{}]\t{} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, output_str, best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) else: self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) if not self.args.test_only: for idx_scale, scale in enumerate(self.scale): self.ckp.save_scale(self, epoch, scale, is_best=( best[1][idx_data, idx_scale] + 1 == epoch)) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() device = torch.device('cpu' if self.args.cpu else 'cuda') with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 eval_acc_ssim = 0 self.loader_test.dataset.set_scale(idx_scale) #tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(self.loader_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) N, C, H, W = lr.size() scale = self.args.scale[idx_scale] outH, outW = int(H * scale), int(W * scale) #_,_,outH,outW = hr.size() #timer_test.tic() scale_coord_map, mask = self.input_matrix_wpn( H, W, self.args.scale[idx_scale]) #position, mask = self.pos_matrix(H,W,self.args.scale[idx_scale]) #print(timer_test.toc()) if self.args.n_GPUs > 1 and not self.args.cpu: scale_coord_map = torch.cat([scale_coord_map] * self.args.n_GPUs, 0) else: scale_coord_map = scale_coord_map.to(device) timer_test.tic() sr = self.model(lr, idx_scale, scale_coord_map) timer_test.hold() re_sr = torch.masked_select(sr, mask.to(device)) sr = re_sr.contiguous().view(N, C, outH, outW) sr = utility.quantize(sr, self.args.rgb_range) #timer_test.hold() save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_ssim += utility.calc_ssim( sr, hr, scale, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: a = 1 self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) # print(timer_test.acc/100) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} SSIM: {:.4f} (Best: {:.3f} @epoch {})' .format(self.args.data_test, scale, self.ckp.log[-1, idx_scale], eval_acc_ssim / len(self.loader_test), best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') # kernel_test = sio.loadmat('data/Compared_kernels_JPEG_noise_x234.mat') scale_list = self.scale #[2,3,4,8] self.ckp.add_log(torch.zeros(1, len(scale_list))) self.model.eval() no_eval = 0 # self.model_NLEst.eval() # self.model_KMEst.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(scale_list): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=120) times = [] for idx_img, (lr, hr, filename) in enumerate(tqdm_test): np.random.seed(seed=0) filename = filename[0] # sz = lr.size() # scale_tensor = torch.ones([1, 1, sz[2], sz[3]]).float() * (scale / 80.0) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] #sz = lr.size() #scale_tensor = torch.ones([1, 1, sz[2], sz[3]]).float() * (2.0 / 80) # print(lr.size()) # hr_ = torch.squeeze(hr_) # hr_ = hr_.numpy() # lr = hr def work(): return sr = self.model(lr, idx_scale) times.append(timeit.timeit(work, number=1)) # Since timeit doesn't return or assign sr sr = self.model(lr, idx_scale) fn = filename.split("/")[-1] dng_filename = filename + "/payload_N000.dng" with rawpy.imread(dng_filename) as raw: for img_type, array in [("hr", hr), ("lr", lr), ("sr", sr)]: rgb = np.moveaxis(np.squeeze(array.cpu().numpy()), 0, 2) # print(rgb.max()) wb = np.diagflat( raw.camera_whitebalance.copy()[:-1]) cam2rgb = raw.color_matrix.copy()[:, :-1] rgb = rgb @ wb if img_type != "lr" else np.clip( rgb @ wb, 0.0, 1.0) rgb = np.clip(rgb, 0, rgb[:, :, 1].max()) if img_type == "sr": rgb = np.clip(rgb, 0, rgb[:, :, 1].max() - (0.1)) img = rgb @ cam2rgb.T img[img < 0] = 0 img = common.hlg(img) img = np.clip(255 * img, 0, 255).astype(np.uint8) # print(img_type, np.max(img), np.count_nonzero(img == 0)) imageio.imsave( "~/output/" + fn + "_" + str(idx_img) + "_" + img_type + ".png", img) ''' lr_img = np.clip( np.moveaxis(np.squeeze(lr.cpu().numpy()), 0, 2) * 255, 0, 255).astype(np.uint8) hr_img = np.clip( np.moveaxis(np.squeeze(hr.cpu().numpy()), 0, 2) * 255, 0, 255).astype(np.uint8) sr_img = np.clip( np.moveaxis(np.squeeze(sr.cpu().numpy()), 0, 2) * 255, 0, 255).astype(np.uint8) fn = filename.split("/")[-1] imageio.imsave("~/output/"+fn+str(idx_img)+"_lr.png", lr_img) imageio.imsave("~/output/"+fn+str(idx_img)+"_hr.png", hr_img) imageio.imsave("~/output/"+fn+str(idx_img)+"_sr.png", sr_img) ''' sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) # # if not no_eval: # # eval_acc += utility.calc_psnr( # # sr, hr, scale, self.args.rgb_range, # # benchmark=self.loader_test.dataset.benchmark # # ) # # save_list.extend([lr, hr]) # if self.args.save_results: self.ckp.save_results(filename, save_list, idx_img, scale) print("Average time:", sum(times) / len(times)) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') ## Add Log to loss_log matrix ## with shape [idx_epoch, number_dataset] self.ckp.add_log( torch.zeros(1, self.args.n_frames, len(self.loader_test))) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for nseqs, tseqs, pathname in tqdm(d, ncols=80): filename = [fname[-10:] for fname in pathname] nseqs, tseqs = self.prepare(nseqs, tseqs) self.model.model.init_hidden() #save_list = [] save_list = {} for idx_frame, (nseq, tseq) in enumerate(zip(nseqs, tseqs)): ## fakeTarget for t'th denoised frame ## fakeNoise for (t-1)'th noised frame alignmented ## after optical-flow fakeTarget = self.model(nseq) fakeTarget = utility.quantize(fakeTarget, self.args.rgb_range) save_list['Est'] = fakeTarget self.ckp.log[-1, idx_frame, idx_data] += utility.calc_psnr( fakeTarget, tseq, self.args.rgb_range, dataset=d) if self.args.save_gt: save_list['Noise'] = nseq save_list['Target'] = tseq #save_list.extend([nseq, tseq]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, idx_frame) self.ckp.log[-1, :, idx_data] /= len(d) self.ckp.write_log('{}\'th epoch, PSNR via frame: {}'.format( epoch, self.ckp.log[-1, :, idx_data])) epoch_best, epoch_idx = self.ckp.log.max(0) best, frame_idx = epoch_best.max(0) best_frame_idx = frame_idx[idx_data] best_epoch_idx = epoch_idx[best_frame_idx, idx_data] self.ckp.write_log( '[{}]\t PSNR: {:.3f} (Best: {:.3f} @frame {} @epoch {})'. format(d.dataset.name, self.ckp.log[-1, :, idx_data].mean(), best[0], best_frame_idx + 1, best_epoch_idx + 1)) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() ## plot PNSR via frame index.s if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best_epoch_idx + 1 == epoch)) self.ckp.plot_psnr(self.args.n_frames, dimension=0, name='idx_frame') self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def train(self): self.loss.step() epoch = self.optimizer.get_last_epoch() + 1 if self.args.resume > 0: epoch = self.args.resume + 1 lr = self.optimizer.get_lr() self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): #if batch > 10: # continue _, hr = self.prepare(lr, hr) #hr = hr/self.args.rgb_range timer_data.hold() timer_model.tic() self.optimizer.zero_grad() # Initial Reconstruction img = utility.quantize(hr, self.args.rgb_range) if self.args.is_fcSim: img = common.flatcamSamp(img / self.args.rgb_range) img = common.apply_noise(img, self.args.sigma) img = common.Raw2Bayer(img) img = common.make_separable(img) sr0 = self.model_init(img, idx_scale) # Enhance reconstruction sr = self.model(sr0, idx_scale) loss = self.loss(sr, hr) if self.args.model == 'kcsres_mwcnn2': loss = loss + self.loss(sr_init, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_(self.model.parameters(), self.args.gclip) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1] self.optimizer.schedule()
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() timer_test = utility.timer() save_folder = 'Results_DL/' + self.args.save + '/' + self.args.data_test[ 0] + '/' if not os.path.exists(save_folder): os.makedirs(save_folder) # if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) count = 0 self.init_psnr = 0 for lr, hr, filename, _ in tqdm(d, ncols=80): _, hr = self.prepare(lr, hr) # Prepare data for test_only #if not self.args.is_fcSim: #if not self.args.test_only: _, _, h, w = hr.size() idx = min(h, w) hr = hr[:, :, 0:idx, 0:idx] # squazsied #if not idx == 256: # hr = F.interpolate(hr, [256, 256]) img = utility.quantize(hr, self.args.rgb_range) if self.args.is_fcSim: img = common.flatcamSamp(img / self.args.rgb_range) img = common.apply_noise(img, self.args.sigma) img = common.Raw2Bayer(img) img = common.make_separable(img) #img = sim_fc_bayerNorm #else: # THis is real data # img = img sr0 = self.model_init(img, idx_scale) sr = self.model(sr0, idx_scale) count = count + 1 sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d) sr0 = utility.quantize(sr0, self.args.rgb_range) self.init_psnr += utility.calc_psnr(sr0, hr, scale, self.args.rgb_range, dataset=d) #if self.args.test_only: # plt.imsave(save_folder + filename[0] + '.png', # torch.squeeze(sr).permute(1, 2, 0).detach().cpu().numpy() /self.args.rgb_range ) if self.args.test_only: sr0 = torch.squeeze(sr0).permute( 1, 2, 0).detach().cpu().numpy() sr = torch.squeeze(sr).permute( 1, 2, 0).detach().cpu().numpy() hr = torch.squeeze(hr).permute( 1, 2, 0).detach().cpu().numpy() # Save Results plt.imsave(save_folder + filename[0] + '.png', sr / self.args.rgb_range) plt.imsave( save_folder + '__init_' + filename[0] + '.png', sr0 / self.args.rgb_range) plt.imsave( save_folder + '__Org_' + filename[0] + '.png', hr / self.args.rgb_range) if self.args.save_gt: save_list.extend([lr, hr]) #print(cur_psnr, init_psnr) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.init_psnr = self.init_psnr / count self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f}, \t init {:.3f} (Best: {:.3f} @epoch {})' .format(d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], self.init_psnr, best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') # if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def binned_len(self): if self._bin_len is None: self._bin_len = quantize(self.span_width()) return self._bin_len
def test(self, is_teacher=False): torch.set_grad_enabled(False) epoch = self.epoch self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) if is_teacher: model = self.t_model else: model = self.s_model model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) i = 0 for lr, hr, filename in tqdm(d, ncols=80): i += 1 lr, hr = self.prepare(lr, hr) sr, s_res = model(lr) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] cur_psnr = utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d) self.ckp.log[-1, idx_data, idx_scale] += cur_psnr if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: save_name = f'{args.k_bits}bit_{filename[0]}' self.ckp.save_results(d, save_name, save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}] PSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) self.writer_train.add_scalar( f'psnr', self.ckp.log[-1, idx_data, idx_scale], self.epoch) if self.args.save_results: self.ckp.end_background() if not self.args.test_only: is_best = (best[1][0, 0] + 1 == epoch) self.ckp.plot_psnr(epoch) # state = { 'epoch': epoch, 'state_dict': self.s_model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'scheduler': self.sheduler.state_dict() } util.save_checkpoint(state, is_best, checkpoint=self.ckp.dir + '/model') self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') scale_list = self.args.scale self.ckp.add_log(torch.zeros(1, len(scale_list))) self.model.eval() no_eval = 0 self.model_NLEst.eval() self.model_KMEst.eval() matrix = matrix_init() V_pca_ = sio.loadmat('data/V.mat') V_pca_ = V_pca_['V_pca'] V_pca = torch.from_numpy(V_pca_).float().cuda() # V_pca = V_pca.t() V_pca = V_pca.contiguous().view(15, 225, 1, 1) timer_test = utility.timer() with torch.no_grad(): best_psnr = 0 for idx_scale, scale in enumerate(scale_list): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=120) for idx_img, (lr_, hr_, filename) in enumerate(tqdm_test): filename = filename[0] quality_factor = 90 no_eval = (hr_.nelement() == 1) if no_eval: [lr_] = self.prepare([lr_]) else: lr_, hr_ = self.prepare([lr_, hr_]) _, _, hei, wid = lr_.data.size() hei, wid = lr_.shape[2:] quality_factor = (105.0 - quality_factor) / 255.0*torch.ones([1, 1, hei, wid]).float().cuda() sf = scale / 16.0 scale_factor = torch.ones(1, 1, hei, wid).float().cuda() * sf ## Estimating noise level sigma_est = self.model_NLEst(lr_, quality_factor, 0) ## Estimating kernel ker_est = self.model_KMEst(lr_, torch.cat((scale_factor, quality_factor, sigma_est), 1), 0) ker_est = ker_est * ( scale ** 2) ker_est = cov2pca(matrix.cuda(), V_pca, ker_est) ## convert cov matrix to PCA coff hei, wid = hr_.shape[2:] deg_map = torch.cat( (quality_factor, sigma, scale_factor, ker_est), 1) deg_map = F.interpolate(deg_map, [hei, wid], mode='bicubic') lr_ = F.interpolate(lr_, [hei, wid], mode='bicubic') sr = self.model(lr_, deg_map, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if no_eval: eval_acc += 0 else: eval_acc += utility.calc_psnr( sr, hr_, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) if self.args.save_results: self.ckp.save_results(filename, save_list, idx_img, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) if best_psnr < self.ckp.log[-1, idx_scale]: is_best = True best_psnr = self.ckp.log[-1, idx_scale] else: is_best = False best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) mean_st = self.ckp.log.mean(1) best_mean = mean_st.max(0) # print(best_mean) # print(best_mean[1][0] + 1 == epoch) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best)
device = 'cuda:0' else: device = 'cpu' model = ArbRCAN(args).to(device) ckpt = torch.load('experiment/ArbRCAN/model/model_'+str(args.resume)+'.pt', map_location=device) model.load_state_dict(ckpt) model.eval() # load lr image lr = imageio.imread(args.dir_img) lr = np.array(lr) lr = torch.Tensor(lr).permute(2, 0, 1).contiguous().unsqueeze(0).to(device) # model is trained on scale factors in range [1, 4] # one can also try out-of-distribution scale factors but the results may be not very promising assert args.sr_size[0] / lr.size(2) > 1 and args.sr_size[0] / lr.size(2) <= 4 assert args.sr_size[1] / lr.size(3) > 1 and args.sr_size[1] / lr.size(3) <= 4 with torch.no_grad(): scale = args.sr_size[0] / lr.size(2) scale2 = args.sr_size[1] / lr.size(3) model.set_scale(scale, scale2) sr = model(lr) sr = utility.quantize(sr, args.rgb_range) sr = sr.data.mul(255 / args.rgb_range) sr = sr[0, ...].permute(1, 2, 0).cpu().numpy() filename = 'experiment/quick_test/results/{}x{}'.format(int(args.sr_size[0]), int(args.sr_size[1])) misc.imsave('{}.png'.format(filename), sr)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): self.loader_test.dataset.set_scale(idx_scale) eval_acc = 0 eval_acc_ssim = 0 # Kernel split # Note that, this part of code does not need to be executed at each run. # After training, one can run this part of code once and save the splitted kernels. for m in self.model.modules(): if hasattr(m, '_prepare'): m._prepare() for idx_img, (lr, hr, filename, _) in enumerate(self.loader_test): no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) lr, hr = self.crop_border(lr, hr, scale) sr = self.model(lr, idx_scale) # run a second time to record inference time for idx_img, (lr, hr, filename, _) in enumerate(self.loader_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) lr, hr = self.crop_border(lr, hr, scale) timer_test.tic() sr = self.model(lr, idx_scale) timer_test.hold() sr = utility.quantize(sr, self.args.rgb_range) hr = utility.quantize(hr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_ssim += utility.calc_ssim( sr, hr, scale, benchmark=self.loader_test.dataset.benchmark) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}] {:.4f}s\tPSNR: {:.3f} SSIM: {:.4f} (Best: {:.3f} @epoch {})' .format(self.args.data_test, scale, timer_test.release() / len(self.loader_test), eval_acc / len(self.loader_test), eval_acc_ssim / len(self.loader_test), best[0][idx_scale], best[1][idx_scale] + 1)) # self.ckp.write_log( # 'Total time: {:.2f}s\n'.format(timer_test.release()/len(self.loader_test)), refresh=True # ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): torch.set_grad_enabled(False) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) unnormalize = transforms.Normalize(mean=[-2.118, -2.036, -1.804], std=[4.367, 4.464, 4.444]) epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() self.args.Noisy = False num_params = 0 for param in self.model.parameters(): num_params += param.numel() print('Total number of parameters: %d' % num_params) timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) #lr = normalize(lr) sr = self.model(lr, idx_scale) #sr = unnormalize(sr) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, 1)) self.model.eval() # pdb.set_trace() self.feature_map_visualization = [] def module_forward_hook(module, input, output): input_numpy = input[0].squeeze().cpu().numpy() output_numpy = output[0].squeeze().cpu().numpy() self.feature_map_visualization.append(input_numpy) self.feature_map_visualization.append(output_numpy) timer_test = utility.timer() with torch.no_grad(): eval_acc = 0 eval_acc_iter2 = 0 eval_acc_iter3 = 0 eval_acc_iter4 = 0 tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (haze, latent, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (latent.nelement() == 1) if not no_eval: haze, latent = self.prepare([haze, latent]) else: haze = self.prepare([haze])[0] # print(self.model) if not self.args.test_only: A_iter1, t_iter1, J_iter1, A_iter2, t_iter2, J_iter2, A_iter3, t_iter3, J_iter3, A_iter4, t_iter4, J_iter4 = self.model( haze) else: # handle = self.model.model.resnet50[0].layer4[2].relu.register_forward_hook(module_forward_hook) A_iter1, t_iter1, J_iter1, A_iter2, t_iter2, J_iter2, A_iter3, t_iter3, J_iter3, A_iter4, t_iter4, J_iter4 = self.model( haze) # handle.remove() A_iter1 = utility.quantize(A_iter1, self.args.rgb_range) t_iter1 = utility.quantize(t_iter1, self.args.rgb_range) J_iter1 = utility.quantize(J_iter1, self.args.rgb_range) A_iter2 = utility.quantize(A_iter2, self.args.rgb_range) t_iter2 = utility.quantize(t_iter2, self.args.rgb_range) J_iter2 = utility.quantize(J_iter2, self.args.rgb_range) A_iter3 = utility.quantize(A_iter3, self.args.rgb_range) t_iter3 = utility.quantize(t_iter3, self.args.rgb_range) J_iter3 = utility.quantize(J_iter3, self.args.rgb_range) A_iter4 = utility.quantize(A_iter4, self.args.rgb_range) t_iter4 = utility.quantize(t_iter4, self.args.rgb_range) J_iter4 = utility.quantize(J_iter4, self.args.rgb_range) save_list = [haze] if not no_eval: eval_acc += utility.calc_psnr( J_iter1, latent, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_iter2 += utility.calc_psnr( J_iter2, latent, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_iter3 += utility.calc_psnr( J_iter3, latent, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_iter4 += utility.calc_psnr( J_iter4, latent, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([latent, A_iter1, t_iter1, J_iter1, \ A_iter2, t_iter2, J_iter2, A_iter3, t_iter3, J_iter3, A_iter4, t_iter4, J_iter4]) if self.args.save_results: self.ckp.save_results(filename, save_list) self.ckp.log[-1, 0] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{}]\tPSNR of iter1: {:.3f}, PSNR of iter2: {:.3f}, PSNR of iter3: {:.3f}, PSNR of iter4: {:.3f} (Best: {:.3f} @epoch {})' .format(self.args.data_test, self.ckp.log[-1, 0], eval_acc_iter2 / len(self.loader_test), eval_acc_iter3 / len(self.loader_test), eval_acc_iter4 / len(self.loader_test), best[0][0], best[1][0] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): # torch.set_grad_enabled(False) with torch.no_grad(): epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, 2, len(self.loader_test), len(self.args.test_scale))) self.model.eval() # self.model = self.model.to('cpu') timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.args.test_scale): d.dataset.set_scale(idx_scale) for lr, hr, filename in tqdm(d, ncols=80): lr = lr.to(self.device) b, c, h, w = lr.size() min_size = 360000 if ((h * w * scale * scale) > min_size): sr = self.forward_chop(lr, scale, min_size=min_size) else: poseMap, interMapY, interMapX = pose_map( lr.shape[2:4], output_size=hr.shape[2:4]) sr = self.model(lr, poseMap, interMapY, interMapX) # poseMapL, poseMapH, interMapY, interMapX = pose_map(lr.shape[2:4], output_size=hr.shape[2:4]) # sr = self.model(lr, poseMapL, poseMapH, interMapY, interMapX) sr = sr.data.cpu() sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, 0, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d) self.ckp.log[-1, 1, idx_data, idx_scale] += utility.calc_ssim( sr, hr, scale, self.args.rgb_range, dataset=d) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) del sr del hr del lr del save_list # fname = self.ckp.get_path( # 'results-{}'.format(d.dataset.name), # '{}_x{}_FL_'.format(filename[0], scale) # ) # self.saveFeature(fl.data.cpu().numpy()[0], fname) # fname = self.ckp.get_path( # 'results-{}'.format(d.dataset.name), # '{}_x{}_FH_'.format(filename[0], scale) # ) # self.saveFeature(fh.data.cpu().numpy()[0], fname) self.ckp.log[-1, 0, idx_data, idx_scale] /= len(d) self.ckp.log[-1, 1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} SSIM: {:.4f} (Best PSNR: {:.3f} @epoch {} Best SSIM: {:.4f} @epoch {})' .format(d.dataset.name, scale, self.ckp.log[-1, 0, idx_data, idx_scale], self.ckp.log[-1, 1, idx_data, idx_scale], best[0][0, idx_data, idx_scale], best[1][0, idx_data, idx_scale] + 1, best[0][1, idx_data, idx_scale], best[1][1, idx_data, idx_scale] + 1)) torch.cuda.empty_cache() self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: # self.ckp.save(self, epoch, is_best=(best[1][0, 0, 0] + 1 == epoch)) self.ckp.save(self, epoch, is_best=(torch.sum(best[1][0, :, :] + 1 == epoch))) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') # kernel_test = sio.loadmat('data/Compared_kernels_JPEG_noise_x234.mat') scale_list = self.scale #[2,3,4,8] self.ckp.add_log(torch.zeros(1, len(scale_list))) self.model.eval() no_eval = 0 # self.model_NLEst.eval() # self.model_KMEst.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(scale_list): eval_acc = 0 self.loader_test.dataset.set_sc ale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=120) for idx_img, (lr, hr, filename) in enumerate(tqdm_test): filename = filename[0] # sz = lr.size() # scale_tensor = torch.ones([1, 1, sz[2], sz[3]]).float() * (scale / 80.0) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] #sz = lr.size() #scale_tensor = torch.ones([1, 1, sz[2], sz[3]]).float() * (2.0 / 80) # print(lr.size()) # hr_ = torch.squeeze(hr_) # hr_ = hr_.numpy() # lr = hr sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) # # if not no_eval: # # eval_acc += utility.calc_psnr( # # sr, hr, scale, self.args.rgb_range, # # benchmark=self.loader_test.dataset.benchmark # # ) # # save_list.extend([lr, hr]) # if self.args.save_results: self.ckp.save_results(filename, save_list, idx_img, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) log_sr = [] log_hr = [] for lr, hr, filename in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) log_hr.append(hr) log_sr.append(sr) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) for idx, s in enumerate(log_sr): if idx > 1: break im_sr = s[0].cpu().numpy().astype('uint8') im_hr = log_hr[idx][0].cpu().numpy().astype('uint8') self.logger.add_image(f"test/sr/{d.dataset.name}", im_sr, epoch) self.logger.add_image(f"test/hr/{d.dataset.name}", im_hr, epoch) self.logger.add_scalar(f"test/psnr/{d.dataset.name}", self.ckp.log[-1, idx_data, idx_scale], epoch) self.logger.flush() self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def test(self): epoch = self.scheduler1.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model1.eval() if (self.args.nmodels == 2): self.ckp2.write_log('\nEvaluation:') self.ckp2.add_log(torch.zeros(1, len(self.scale))) self.model2.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 if (self.args.nmodels == 2): eval_acc2 = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) n_test_data = len(self.loader_test.dataset) print("n_test_data", n_test_data) Test_pred_mat_HR = np.zeros((n_test_data, 3, 111, 111)) Test_pred_mat_LR = np.zeros((n_test_data, 3, 111, 111)) Test_pred_mat_SR = np.zeros((n_test_data, 3, 111, 111)) Test_pred_mat_Limg = np.zeros((n_test_data, 3, 111, 111)) if (self.args.nmodels == 2): Test_pred_mat_HR_2 = np.zeros((n_test_data, 3, 111, 111)) Test_pred_mat_LR_2 = np.zeros((n_test_data, 3, 111, 111)) Test_pred_mat_SR_2 = np.zeros((n_test_data, 3, 111, 111)) #Test_pred_mat_Limg_2 = np.zeros((n_test_data,3,111,111)) k = 0 for idx_img, (lr, hr, lr2, hr2, filename, _) in enumerate(tqdm_test): filename = filename[0] #no_eval = (hr.item() == -1) no_eval = False # print ("before prep", lr.dtype,hr.dtype) # print ("before prep", lr.max(),lr.min(),hr.max(),hr.min()) if not no_eval: if (self.args.model == 'MWCNN'): lr, hr = self.prepare_test([lr, hr]) else: lr, hr = self.prepare([lr, hr]) else: if (self.args.model == 'MWCNN'): lr = self.prepare_test([lr])[0] else: lr = self.prepare([lr])[0] # print ("After prep", lr.dtype,hr.dtype) # print ("After prep", lr.max(),lr.min(),hr.max(),hr.min()) sr1 = self.model1(lr, idx_scale) if (self.args.model == 'MWCNN'): hr = hr[:, :, 1:, 1:] lr = lr[:, :, 1:, 1:] if (self.args.nmodels == 2): if (self.args.Test_feed_model1_out): ###### the SR from Model 1 id used as LR for model 2 if not no_eval: #lr2, hr2 = self.prepare([sr1, hr2]) #lr2, hr2 = self.prepare([lr2, hr2]) lr2 = sr1.to(torch.device('cuda')) lr2 = utility.quantize(lr2, self.args.rgb_range) hr2 = hr2.to(torch.device('cuda')) else: if (self.args.model == 'MWCNN'): lr2 = self.prepare_test([sr1])[0] else: lr2 = self.prepare([lr2])[0] else: if not no_eval: if (self.args.model == 'MWCNN'): lr2, hr2 = self.prepare_test([lr2, hr2]) else: lr2, hr2 = self.prepare([lr2, hr2]) else: if (self.args.model == 'MWCNN'): lr2 = self.prepare_test([lr2])[0] else: lr2 = self.prepare([lr2])[0] #lr2 = sr1 #sr1_prep = self.prepare([sr1])[0] sr2 = self.model2(lr2, idx_scale) if (self.args.model == 'MWCNN'): #hr2 = hr[:,:,1:,1:] lr2 = lr2[:, :, 1:, 1:] sr2 = sr2[:, :, 1:, 1:] # print ("After eval", sr.dtype) # print ("After eval", sr.max(),sr.min()) if (self.args.model == 'MWCNN'): sr1 = sr1[:, :, 1:, 1:] sr1 = utility.quantize(sr1, self.args.rgb_range) Test_pred_mat_HR[k * 250:(k + 1) * 250, :, :, :] = hr Test_pred_mat_LR[k * 250:(k + 1) * 250, :, :, :] = lr Test_pred_mat_SR[k * 250:(k + 1) * 250, :, :, :] = sr1 Test_pred_mat_Limg[k * 250:(k + 1) * 250:, :, :] = hr2 save_list = [sr1] if (self.args.nmodels == 2): sr2 = utility.quantize(sr2, self.args.rgb_range) # print ("After quantize", sr.dtype) # print ("After quantize", sr.max(),sr.min()) Test_pred_mat_HR_2[k * 250:(k + 1) * 250, :, :, :] = hr2 Test_pred_mat_LR_2[k * 250:(k + 1) * 250, :, :, :] = lr2 Test_pred_mat_SR_2[k * 250:(k + 1) * 250, :, :, :] = sr2 #Test_pred_mat_Limg_2[k,:,:,:] = hr2 #print ("TEST-Data shape-LR",lr.size()) #print ("TEST-Data shape-HR",hr.size()) #print ("TEST-Data shape-SR",sr.size()) save_list2 = [sr2] if not no_eval: eval_acc += utility.calc_psnr( sr1, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if (self.args.nmodels == 2): eval_acc2 += utility.calc_psnr( sr2, hr2, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) #print ("eval_acc",eval_acc,"eval_acc2",eval_acc2) save_list2.extend([lr2, hr2]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) if (self.args.nmodels == 2): self.ckp2.save_results(filename, save_list2, scale) k = k + 1 dir_save = '/gpfs/jlse-fs0/users/sand33p/stronglensing/Image_Enhancement/EDSR_MWCNN/experiment/' + self.args.save filename_save_HR = '{}/results/Array_HR.npz'.format(dir_save) filename_save_LR = '{}/results/Array_LR.npz'.format(dir_save) filename_save_SR = '{}/results/Array_SR.npz'.format(dir_save) filename_save_Limg = '{}/results/Array_Limg.npz'.format( dir_save) np.savez_compressed(filename_save_HR, X=Test_pred_mat_HR) np.savez_compressed(filename_save_LR, X=Test_pred_mat_LR) np.savez_compressed(filename_save_SR, X=Test_pred_mat_SR) np.savez_compressed(filename_save_Limg, X=Test_pred_mat_Limg) if not self.args.test_only: print("self.it_ckp", self.it_ckp) with h5py.File(self.HDF5_file_model1_loc, 'a') as hf: hf["Array_HR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_HR hf["Array_LR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_LR hf["Array_SR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_SR hf["Array_Limg"][ self.it_ckp, :, :, :, :] = Test_pred_mat_Limg hf.close() self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR-ckp: {:.3f} (Best: {:.3f} @epoch {})'. format(self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) if (self.args.nmodels == 2): dir_save = '/gpfs/jlse-fs0/users/sand33p/stronglensing/Image_Enhancement/EDSR_MWCNN/experiment/' + self.args.save filename_save_HR_2 = '{}/results/Array_HR_2.npz'.format( dir_save) filename_save_LR_2 = '{}/results/Array_LR_2.npz'.format( dir_save) filename_save_SR_2 = '{}/results/Array_SR_2.npz'.format( dir_save) #filename_save_Limg_2 = '{}/results/Array_Limg_2.npz'.format(dir_save) np.savez_compressed(filename_save_HR_2, X=Test_pred_mat_HR_2) np.savez_compressed(filename_save_LR_2, X=Test_pred_mat_LR_2) np.savez_compressed(filename_save_SR_2, X=Test_pred_mat_SR_2) if not self.args.test_only: with h5py.File(self.HDF5_file_model2_loc, 'a') as hf2: hf2["Array_HR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_HR_2 hf2["Array_LR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_LR_2 hf2["Array_SR"][ self.it_ckp, :, :, :, :] = Test_pred_mat_SR_2 hf2.close() self.ckp2.log[-1, idx_scale] = eval_acc2 / len( self.loader_test) best2 = self.ckp2.log.max(0) self.ckp2.write_log( '[{} x{}]\tPSNR-ckp2: {:.3f} (Best: {:.3f} @epoch {})'. format(self.args.data_test, scale, self.ckp2.log[-1, idx_scale], best2[0][idx_scale], best2[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if (self.args.nmodels == 2): self.ckp2.write_log('Total time: {:.2f}s\n'.format( timer_test.toc()), refresh=True) if not self.args.test_only: #save(self, trainer, epoch, is_best=False,model=trainer.model1,loss=trainer.loss1,optimizer=trainer.optimizer1,model_name='model1'): self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch), model=self.model1, loss=self.loss1, optimizer=self.optimizer1, model_name='model1') if ((self.args.nmodels == 2) & (self.args.numloss == 2)): if (self.use_two_opt): self.ckp2.save(self, epoch, is_best=(best2[1][0] + 1 == epoch), model=self.model2, loss=self.loss2, optimizer=self.optimizer2, model_name='model2') else: self.ckp2.save(self, epoch, is_best=(best2[1][0] + 1 == epoch), model=self.model2, loss=self.loss2, optimizer=self.optimizer1, model_name='model2') #elif (self.args.nmodels == 2): # self.ckp2.save(self, epoch, is_best=(best2[1][0] + 1 == epoch),model=self.model2,loss=self.loss1,optimizer=self.optimizer1,model_name='model2') if not self.args.test_only: self.it_ckp = self.it_ckp + 1
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 eval_acc_refine1 = 0 eval_acc_refine2 = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr, sr_refine1, sr_refine2 = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) sr_refine1 = utility.quantize(sr_refine1, self.args.rgb_range) sr_refine2 = utility.quantize(sr_refine2, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) eval_acc_refine1 += utility.calc_psnr( sr_refine1, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) eval_acc_refine2 += utility.calc_psnr( sr_refine2, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([sr_refine1, sr_refine2, lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f}, PSNR of refine1: {:.3f}, PSNR of refine2: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], eval_acc_refine1/len(self.loader_test), eval_acc_refine2/len(self.loader_test), best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): import onnx from ngraph_onnx.onnx_importer.importer import import_onnx_model import ngraph as ng global dim0, dim2, dim3 torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() # print(self.loader_test) for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) print('idx_scale={}'.format(idx_scale)) # print("len: {}".format(len(d))) # for lr, hr, filename, _ in tqdm(d, ncols=80): for batch, (lr, hr, filename, _) in enumerate(d): print('{} '.format(batch), end='', flush=True) lr, hr = self.prepare(lr, hr) print('test lr.size: {}'.format(lr.size())) dim0 = lr.size()[0] dim2 = lr.size()[2] dim3 = lr.size()[3] showbug = False if showbug: print('stage1', flush=True) if self.args.ngraph: pytorch_model_name = self.args.ngraph pytorch_edsr_model = torch.load(pytorch_model_name).cuda() if showbug: print('stage2-1', flush=True) # print(lr.size()) # dummy_input = torch.randn_like(lr, device='cuda') if showbug: print('stage2-2', flush=True) edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name) # print('Export to onnx model {}'.format(edsr_onnx_filename)) torch.onnx.export(pytorch_edsr_model, lr.to(torch.device('cuda')), edsr_onnx_filename, export_params=True, verbose=False, training=False) if showbug: print('stage2-3', flush=True) edsr_onnx_model = onnx.load(edsr_onnx_filename) # print(onnx.helper.printable_graph(edsr_onnx_model.graph)) if showbug: print('stage2-4', flush=True) ng_models = import_onnx_model(edsr_onnx_model) # print('Convert to nGreph Model') ng_model = ng_models[0] if showbug: print('stage2-5', flush=True) runtime = ng.runtime(backend_name='CPU') if showbug: print('stage2-6', flush=True) edsr_ng_model = runtime.computation(ng_model['output'], *ng_model['inputs']) if showbug: print('stage2-7', flush=True) sr = edsr_ng_model(lr, idx_scale) if showbug: print('stage2-8', flush=True) sr = torch.from_numpy(sr) if showbug: print('stage2-9', flush=True) elif self.args.tensorrt: pytorch_model_name = self.args.tensorrt pytorch_edsr_model = torch.load(pytorch_model_name) # lr_np = lr.numpy().astype(np.float32) dummy_input = torch.randn_like(lr, device='cuda') edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name) print('Export to onnx model {}'.format(edsr_onnx_filename)) torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False) import os import onnx edsr_onnx_model = onnx.load(edsr_onnx_filename) # print(onnx.helper.printable_graph(edsr_onnx_model.graph)) import tensorrt import onnx_tensorrt.backend as backend import numpy as np tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0') # lr_np = lr_np.to(torch.device("cuda:0")) # lr.numpy().astype(np.float32) sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0] sr = torch.from_numpy(sr) print('complete one') pytorch_model_name = self.args.tensorrt pytorch_edsr_model = torch.load(pytorch_model_name) # lr_np = lr.numpy().astype(np.float32) dummy_input = torch.randn_like(lr, device='cuda') edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name) print('Export to onnx model {}'.format(edsr_onnx_filename)) torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False) import os import onnx edsr_onnx_model = onnx.load(edsr_onnx_filename) # print(onnx.helper.printable_graph(edsr_onnx_model.graph)) import tensorrt import onnx_tensorrt.backend as backend import numpy as np tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0') # lr_np = lr_np.to(torch.device("cuda:0")) # lr.numpy().astype(np.float32) sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0] sr = torch.from_numpy(sr) print('complete two') else: sr = self.model(lr, idx_scale) if showbug: print('stage3', flush=True) sr = utility.quantize(sr, self.args.rgb_range) if showbug: print('stage4', flush=True) save_list = [sr] if showbug: print('stage5', flush=True) self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if showbug: print('stage6', flush=True) if self.args.save_gt: save_list.extend([lr, hr]) if showbug: print('stage7', flush=True) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) if showbug: print('stage8', flush=True) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) psnr = self.ckp.log[-1, idx_data, idx_scale].numpy() print('') self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True) return psnr
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() # print(self.args.save_results) timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) ssim_list = [] for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] cur_psnr = utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d) self.ckp.log[-1, idx_data, idx_scale] += cur_psnr if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: # print(filename[0]) self.ckp.save_results(d, filename[0], save_list, scale) if filename[0] in self.save_name_list: self.ckp.write_log( f'{filename[0]} PSNR/SSIM: {cur_psnr:.3f}') self.ckp.log[-1, idx_data, idx_scale] /= len(d) if len(ssim_list) > 0: ssim = sum(ssim_list) / len(ssim_list) else: ssim = 0.0 best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR/SSIM: {:.3f}/{:.4f} (Best: {:.3f} @epoch {})' .format(d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], ssim, best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)