def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] scale_ = scale *self.args.px x, y = (lr.size(-2)//scale_)*scale_, (lr.size(-1)//scale_)*scale_ sr = self.model(lr[:,:,:x,:y], idx_scale) sr = utility.quantize(sr, self.args.rgb_range) sr = torch.cat([sr, lr[:,:,x:,:y]], -2) sr = torch.cat([sr, lr[:,:,:,y:]], -1) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare([lr, hr]) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, self.args.scale[idx_scale]) loss = self.loss(sr, hr) if loss.data[0] < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.data[0])) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def train(self): self.optimizer.schedule() self.loss.step() epoch = self.optimizer.get_last_epoch() + 1 lr = self.optimizer.get_lr() self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_(self.model.parameters(), self.args.gclip) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def main(fit_params): data_path = '../data' train_base = pd.read_csv(f'{data_path}/application_train.csv') test_base = pd.read_csv(f'{data_path}/application_test.csv') train_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True) test_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True) with timer('Creating variables in base set'): for df in [train_base, test_base]: df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df[ 'AMT_INCOME_TOTAL'] df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT'] df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT'] with timer('Aggregating bureau.csv'): bureau_df = agg_bureau() train_base = train_base.join(bureau_df, how='left') test_base = test_base.join(bureau_df, how='left') del bureau_df gc.collect() with timer('Aggregating previous_application.csv'): previous_application_df = agg_pre_application() train_base = train_base.join(previous_application_df, how='left') test_base = test_base.join(previous_application_df, how='left') del previous_application_df gc.collect() y = train_base['TARGET'] del train_base['TARGET'] y = LabelEncoder().fit_transform(y) header = 'Grid Searching Pipeline with parameter grids' if fit_params else 'Fitting and predicting' with timer(header): fit_pipeline(train_base, y, predict=True, x_score=test_base, fit_params=fit_params)
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.add_log(torch.zeros(1, len(self.scale))) best = self.ckp.log.max(0) # evaluate model every valid_interval epoches if epoch % self.args.valid_interval == 0: self.ckp.write_log('\nEvaluation:') self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range) if self.args.save_results: for idx_sr in range(len(sr)): filename = filename[idx_sr] # save_test_SR need a 4-dim vector [B,C,H,W] whose batch size == 1 sr_save = sr[idx_sr].unsqueeze(dim=0) self.ckp.save_test_SR(filename, sr_save, epoch, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format( timer_test.toc()), refresh=True) # save models if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch)) self.ckp.plot_psnr(epoch)
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( "[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})".format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1, )) self.ckp.write_log("Forward: {:.2f}s\n".format(timer_test.toc())) self.ckp.write_log("Saving...") if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log("Total: {:.2f}s\n".format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def test(epoch): student.eval() with torch.no_grad(): if args.save_results: student_ckp.begin_background() student_ckp.write_log('\nEvaluation:') student_ckp.add_log(torch.zeros(1, len(test_loader), len(args.scale))) timer_test = utility.timer() for idx_data, d in enumerate(test_loader): for idx_scale, scale in enumerate(args.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = prepare(lr, hr) fms, sr = student(lr) sr = utility.quantize(sr, args.rgb_range) save_list = [sr] student_ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, args.rgb_range, dataset=d) if args.save_gt: save_list.extend([lr, hr]) if args.save_results: student_ckp.save_results(d, filename[0], save_list, scale) student_ckp.log[-1, idx_data, idx_scale] /= len(d) best = student_ckp.log.max(0) student_ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, student_ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1)) student_ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) student_ckp.write_log('Saving...') if args.save_results: student_ckp.end_background() save(is_best=(best[1][0, 0] + 1 == epoch), epoch=epoch) student_ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
def test(self): epoch = self.scheduler.last_epoch self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, 1)) self.model.eval() timer_test = utility.timer() with torch.no_grad(): scale = max(self.scale) for si, s in enumerate([scale]): eval_psnr = 0 tqdm_test = tqdm(self.loader_test, ncols=80) for _, (lr, hr, filename) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) sr = self.model(lr[0]) if isinstance(sr, list): sr = sr[-1] sr = utility.quantize(sr, self.opt.rgb_range) if not no_eval: eval_psnr += utility.calc_psnr( sr, hr, s, self.opt.rgb_range, benchmark=self.loader_test.dataset.benchmark ) # save test results if self.opt.save_results: self.ckp.save_results_nopostfix(filename, sr, s) self.ckp.log[-1, si] = eval_psnr / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.2f} (Best: {:.2f} @epoch {})'.format( self.opt.data_test, s, self.ckp.log[-1, si], best[0][si], best[1][si] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.opt.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, _) in enumerate(tqdm_test): filename = filename[0] no_eval = isinstance(hr[0], int) if no_eval: lr = self.prepare([lr], volatile=True)[0] else: lr, hr = self.prepare([lr, hr], volatile=True) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: if self.args.test_only: self.ckp.save_results_test(filename, save_list, scale) else: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} from epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): epoch = self.scheduler.last_epoch #+ 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) B0,ListB,ListR = self.model(lr, idx_scale) sr = utility.quantize(ListB[-1], self.args.rgb_range) # restored background at the last stage save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark ) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1 ) ) self.ckp.write_log( 'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1)) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, (lr, hr, filename) in enumerate(tqdm(self.loader_test)): lr, hr = self.prepare(lr, hr) sr = self.model(lr, self.scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1] += utility.calc_psnr(sr, hr, self.scale, self.args.rgb_range, dataset=self.loader_test) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(self.loader_test, filename[0], save_list, self.scale) self.ckp.log[-1] /= len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} @epoch{} (Best: {:.3f} @epoch {})'.format( self.loader_test.dataset.name, self.scale, self.ckp.log[-1], epoch, best[0], best[1] + 1)) self.optimizer.schedule(self.ckp.log[-1]) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1] + 1 == epoch)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def train(self): epoch, _ = self.start_epoch() self.model.begin(epoch, self.ckp) self.loss.start_log() timer_data, timer_model = utility.timer(), utility.timer() n_samples = 0 for batch, (img, label) in enumerate(self.loader_train): img, label = self.prepare(img, label) n_samples += img.size(0) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() prediction = self.model(img) loss, _ = self.loss(prediction, label) loss.backward() self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log( '{}/{} ({:.0f}%)\t' 'NLL: {:.3f}\t' 'Top1: {:.2f} / Top5: {:.2f}\t' 'Time: {:.1f}+{:.1f}s'.format( n_samples, len(self.loader_train.dataset), 100.0 * n_samples / len(self.loader_train.dataset), *(self.loss.log_train[-1, :] / n_samples), timer_model.release(), timer_data.release())) timer_data.tic() self.model.log(self.ckp) self.loss.end_log(len(self.loader_train.dataset))
def test(self): # test or validation epoch = self.epoch() self.ckp.write_log('\nEvaluation:') scale = 2 self.ckp.add_log(torch.zeros(1)) #(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): eval_acc = 0 #psnr loss valid_loss = 0 # total loss based on the training loss for im_idx, im_dict in enumerate(self.loader_test, 1): lr = im_dict['im_lr'] hr = im_dict['im_hr'] lr, hr = self.prepare([lr, hr]) sr, sr_ = self.model(lr) sr = torch.clamp(sr, 0, 1) sr_ = torch.clamp(sr_, 0, 1) self.lr_valid = np.average(lr[0, :, :, :].permute( 1, 2, 0).cpu().numpy(), axis=2) self.hr_valid = hr[0, :, :, :].permute(1, 2, 0).cpu().numpy() self.sr_valid = sr[0, :, :, :].permute( 1, 2, 0).cpu().detach().numpy() # sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] # do some processing on sr, hr or modify find_psnr() eval_acc += errors.find_psnr(sr, hr) save_list.extend([lr, hr]) loss = self.loss.valid_loss(sr, sr_, hr) valid_loss += loss.item() # save the sr images of the last epoch if self.args.save_results and epoch == self.args.epochs: self.ckp.save_results("image_{}_sr".format(im_idx), save_list, scale) self.ckp.log_accuracy[-1] = (valid_loss / len(self.loader_test)) self.ckp.log[-1] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1], best[0].item(), epoch)) # ckp.save saves loss and model and plot_loss defined in the # Checkpoint class self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: # self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch)) self.ckp.save(self, epoch, is_best=False)
def __init__(self, opt, model, dataset): self.opt = opt self.model = model self.dataset = dataset self.device = torch.device("cuda" if opt.use_cuda else "cpu") self.timer = util.timer() self.epoch = 0 #build optimizer self.optimizer_dict = {} for scale, _ in model.scale_dict.items(): self.optimizer_dict[scale] = optim.Adam(model.networks[model.scale_dict[scale]].parameters(), lr=opt.lr, weight_decay=opt.weight_decay) self.loss_func = self._get_loss_func(opt.loss_type) #load a model on a target device self.model = self.model.to(self.device)
def test(self, image): self.model.to(self.device).eval() timer_test = utility.timer() with torch.no_grad(): lr = image lr = self.prepare([lr]).to(self.device) sr = self.model(lr, 1) sr = utility.quantize(sr, 255) #save_list = [sr] sr = sr.squeeze(0) normalized = sr.data.mul(255 / 255) ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() #misc.imsave('{}{}.png'.format("out", "_lol"), ndarr) return ndarr
def calc_map(args, model, log_manager, img_preprocessor, dataloader): map_calculator = evaluation.Map_calculator(args) sv_gt_batch_generator = utility.Sv_gt_batch_generator(args) timer_test = utility.timer() dataset_interval = 18 if args.fast_val else 1 dataset_size = len(dataloader) // dataset_interval progbar = generic_utils.Progbar(dataset_size) for idx in range(0, len(dataloader), dataset_interval): images, labels, image_paths, extrins, rpn_results, ven_results = dataloader[idx] X = img_preprocessor.process_batch(images) all_dets = model.predict_batch(X, images, extrins, rpn_results, ven_results) gt_batch = sv_gt_batch_generator.get_gt_batch(labels) for cam_idx in range(args.num_valid_cam) : dets = all_dets[cam_idx] gt = gt_batch[0][cam_idx] map_calculator.add_tp_fp(dets, gt) progbar.update(idx/dataset_interval+1) all_aps = map_calculator.get_aps() #iou_avg = map_calculator.get_iou() log_manager.add(all_aps, 'ap') #log_manager.add(iou_avg, 'iou') log_manager.save() all_ap_dict = map_calculator.get_aps_dict() cur_map = map_calculator.get_map() log_manager.write_cur_time() log_manager.write_log('Evaluation:') log_manager.write_log('mAP\t%.2f'%(cur_map*100)) #log_manager.write_log('iou\t{:.3f}'.format(iou_avg)) log_manager.write_log('Runtime(s)\t{:.2f}'.format(timer_test.toc())) for i, (cls, prob) in enumerate(all_ap_dict.items()): if prob<0 : continue log_manager.write_log('%s\t%.2f'%(cls, prob*100)) log_manager.write_log('\n') return cur_map
def test(self): torch.set_grad_enabled(False) self.ckp.write_log("\nEvaluation on video:") self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path("{}_x{}.avi".format(self.filename, scale)), cv2.VideoWriter_fourcc(*"XVID"), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)), ), ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log("Total: {:.2f}s\n".format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def test(self): # epoch = self.scheduler.last_epoch + 1 epoch = self.epoch self.ckp.write_log('\nEvaluation:') self.loss.start_log(train=False) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for img, label in tqdm(self.loader_test, ncols=80): img, label = self.prepare(img, label) torch.cuda.synchronize() timer_test.tic() # a=time.time() # from IPython import embed; embed() prediction = self.model(img) torch.cuda.synchronize() timer_test.hold() # b = time.time()-a # print('The elapse time is {}'.format(b)) self.loss(prediction, label, train=False) if self.args.debug: self._analysis() mem = torch.cuda.max_memory_allocated() / 1024.0**2 self.loss.end_log(len(self.loader_test.dataset), train=False) # Lower is better best = self.loss.log_test.min(0) for i, measure in enumerate(('Loss', 'Top1 error', 'Top5 error')): self.ckp.write_log( '{}: {:.3f} (Best: {:.3f} from epoch {})'.format( measure, self.loss.log_test[-1, i], best[0][i], best[1][i] + 1 if len(self.loss.log_test) == len( self.loss.log_train) else best[1][i])) total_time = timer_test.release() is_best = self.loss.log_test[-1, self.args.top] <= best[0][self.args.top] self.ckp.save(self, epoch, is_best=is_best) self.ckp.save_results(epoch, self.model) self.scheduler.step()
def test(self): epoch = self.scheduler.last_epoch + 1 #self.ckp.write_log('\nEvaluation:') #self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 ker_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename, ker_y, _) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr, ker_y = self.prepare([lr, hr, ker_y]) else: lr = self.prepare([lr])[0] sr, ker = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] #ker_list = [ker, ker_y] #ker_acc += utility.calc_ker(ker, ker_y) if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_test_results(filename, save_list, scale)
def __init__(self, args, loader, my_model, my_loss, ckp): self.args = args self.scale = args.scale self.ckp = ckp self.loader_train = loader.loader_train self.loader_test = loader.loader_test self.model = my_model self.loss = my_loss self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) if self.args.load != '.': self.optimizer.load_state_dict( torch.load(os.path.join(ckp.dir, 'optimizer.pt'))) for _ in range(len(ckp.log)): self.scheduler.step() self.error_last = 1e8 self.global_timer = utility.timer() self.global_timer.tic()
def test(self): self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, (lr, hr, filename) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr = lr.to('cuda', non_blocking=True) hr = hr.to('cuda', non_blocking=True) else: lr = lr.to('cuda', non_blocking=True) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if sr.shape != hr.shape: print('\n', filename, lr.shape, sr.shape, hr.shape) # raise save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results_nopostfix( filename, save_list, scale) # 预测的图片存起来
def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on test:') psnr_score = 0 self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, (lr, filename) in enumerate(tqdm(self.loader_test)): lr, = self.prepare(lr) sr = self.model(lr, self.scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if self.args.save_results: self.ckp.save_results(self.loader_test, filename[0], save_list, self.scale) size = len(self.loader_test) if size > 0: psnr_score /= len(self.loader_test) else: self.ckp.write_log("no test data!!!") exit(-1) if self.args.save_results: self.ckp.end_background() self.ckp.write_log("PSNR:{:.3f}".format(psnr_score)) self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)
def train(self): #self.scheduler.last_epoch = 10 self.scheduler.step() self.loss.step() #print(torch.load(os.path.join(self.ckp.dir, 'model', 'model_93.pt'))) #self.model.load_state_dict( # torch.load(os.path.join(self.ckp.dir, 'model', 'model_93.pt')) # ) epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() huber_loss = torch.nn.SmoothL1Loss(reduce=True, size_average=True) timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() import random up_or_down = random.randint(0, 1) factor = random.randint(0, 3) sf = factor+1 #print(sf) [b, c, h, w] = lr.shape new_h = int(h/sf) new_w = int(w/sf) downer =torch.nn.Upsample(size=[new_h, new_w], mode='bilinear', align_corners=True) uper = torch.nn.Upsample(scale_factor=sf, mode='bilinear', align_corners=True) hr_rs = uper(downer(hr)) hr_res, hr_res_feat = self.model(hr_rs, idx_scale) area_weight = sf*sf if up_or_down==0: sr, sr_feat = self.model(lr, idx_scale) lr_up = uper(lr) hr_up = uper(hr) sr_up, sr_feat_up = self.model(lr_up, idx_scale) sr_edge_x = sr[:,:,0:h-1,:]-sr[:,:,1:h,:] sr_edge_y = sr[:,:,:,0:w-1]-sr[:,:,:,1:w] hr_edge_x = hr[:,:,0:h-1,:]-hr[:,:,1:h,:] hr_edge_y = hr[:,:,:,0:w-1]-hr[:,:,:,1:w] [b, c, h, w] = lr_up.shape new_h = int(h/sf) new_w = int(w/sf) downer =torch.nn.Upsample(size=[new_h, new_w], mode='bilinear', align_corners=True) sr_up_down = downer(sr_up) [b, c, h, w] = sr_up_down.shape sr_up_down_edge_x = sr_up_down[:,:,0:h-1,:]-sr_up_down[:,:,1:h,:] sr_up_down_edge_y = sr_up_down[:,:,:,0:w-1]-sr_up_down[:,:,:,1:w] loss = self.loss(downer(sr_up), hr) + self.loss(sr, hr) + 0.05*self.loss(downer(sr_feat_up), sr_feat.detach()) loss += 0.05*self.loss(hr_res, hr) else: downer =torch.nn.Upsample(size=[new_h, new_w], mode='bilinear', align_corners=True) lr_down = downer(lr) hr_down = downer(hr) sr_down, sr_feat_down = self.model(lr_down, idx_scale) sr, sr_feat = self.model(lr, idx_scale) [b, c, h, w] = hr.shape sr_edge_x = sr[:,:,0:h-1,:]-sr[:,:,1:h,:] sr_edge_y = sr[:,:,:,0:w-1]-sr[:,:,:,1:w] hr_edge_x = hr[:,:,0:h-1,:]-hr[:,:,1:h,:] hr_edge_y = hr[:,:,:,0:w-1]-hr[:,:,:,1:w] [b, c, h, w] = hr_down.shape sr_down_edge_x = sr_down[:,:,0:h-1,:]-sr_down[:,:,1:h,:] sr_down_edge_y = sr_down[:,:,:,0:w-1]-sr_down[:,:,:,1:w] hr_down_edge_x = hr_down[:,:,0:h-1,:]-hr_down[:,:,1:h,:] hr_down_edge_y = hr_down[:,:,:,0:w-1]-hr_down[:,:,:,1:w] loss = self.loss(sr_down, hr_down.detach())*area_weight + self.loss(sr, hr) + 0.05*self.loss(sr_feat_down, downer(sr_feat.detach()))*area_weight if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item() )) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() criterion_ssim = pytorch_ssim.SSIM(window_size=11) criterion_mse = nn.MSELoss(size_average=True) # vgg_model = vgg_init('./pretrained/vgg16-397923af.pth') # vgg = vgg_v2(vgg_model) # vgg.eval() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() lr = lr / 255.0 hr = hr / 255.0 [b, c, h, w] = hr.shape phr1, phr2, phr4 = self.model(lr, 3) hr4 = hr[:, :, 0::4, 0::4] hr2 = hr[:, :, 0::2, 0::2] hr1 = hr rect_loss = criterion_ssim(phr1, hr1) + criterion_ssim( phr2, hr2) + criterion_ssim(phr4, hr4) full_loss = rect_loss if full_loss.item() < self.args.skip_threshold * self.error_last: full_loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, rect_loss.item())) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}=\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), full_loss.item(), rect_loss.item(), #percept_loss.item(), timer_model.release(), timer_data.release())) timer_data.tic() print(rect_loss.item()) self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() device = torch.device('cpu' if self.args.cpu else 'cuda') with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 eval_acc_ssim = 0 self.loader_test.dataset.set_scale(idx_scale) ###san是没有注释的: #tqdm_test = tqdm(self.loader_test, ncols=80) ###和san不同: for idx_img, (lr, hr, filename, _) in enumerate(self.loader_test): filename = filename[0] no_eval = (hr.nelement() == 1) if not no_eval: lr, hr = self.prepare(lr, hr) else: lr, = self.prepare(lr) N, C, H, W = lr.size() scale = self.args.scale[idx_scale] outH, outW = int(H * scale), int(W * scale) #_,_,outH,outW = hr.size() #timer_test.tic() scale_coord_map, mask = self.input_matrix_wpn_new( H, W, self.args.scale[idx_scale]) #position, mask = self.pos_matrix(H,W,self.args.scale[idx_scale]) #print(timer_test.toc()) if self.args.n_GPUs > 1 and not self.args.cpu: scale_coord_map = torch.cat([scale_coord_map] * self.args.n_GPUs, 0) else: scale_coord_map = scale_coord_map.to(device) timer_test.tic() sr = self.model(lr, idx_scale, scale_coord_map) timer_test.hold() re_sr = torch.masked_select(sr, mask.to(device)) sr = re_sr.contiguous().view(N, C, outH, outW) sr = utility.quantize(sr, self.args.rgb_range) #timer_test.hold() save_list = [sr] #??? print("no_eval:") print(no_eval) if not no_eval: eval_acc += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) eval_acc_ssim += utility.calc_ssim( sr, hr, scale, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: a = 1 self.ckp.save_results(filename, save_list, scale) ### self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) # print(timer_test.acc/100) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} SSIM: {:.4f} (Best: {:.3f} @epoch {})' .format(self.args.data_test, scale, self.ckp.log[-1, idx_scale], eval_acc_ssim / len(self.loader_test), best[0][idx_scale], best[1][idx_scale] + 1)) #??? print( '[{} x{}]\tPSNR: {:.3f} SSIM: {:.4f} (Best: {:.3f} @epoch {})' .format(self.args.data_test, scale, self.ckp.log[-1, idx_scale], eval_acc_ssim / len(self.loader_test), best[0][idx_scale], best[1][idx_scale] + 1)) print("psnr") print(eval_acc / len(self.loader_test)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() device = torch.device('cpu' if self.args.cpu else 'cuda') timer_data, timer_model = utility.timer(), utility.timer() for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() ### N, C, H, W = lr.size() _, _, outH, outW = hr.size() scale_coord_map, mask = self.input_matrix_wpn_new( H, W, self.args.scale[idx_scale]) ### get the position matrix, mask if self.args.n_GPUs > 1 and not self.args.cpu: scale_coord_map = torch.cat([scale_coord_map] * self.args.n_GPUs, 0) else: scale_coord_map = scale_coord_map.to(device) ### self.optimizer.zero_grad() ###不太一样: sr = self.model(lr, idx_scale, scale_coord_map) ### ### re_sr = torch.masked_select(sr, mask.to(device)) re_sr = re_sr.contiguous().view(N, C, outH, outW) ### loss = self.loss(re_sr, hr) if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item())) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1] if self.args.n_GPUs == 1: target = self.model else: target = self.model #.module torch.save( target.state_dict(), os.path.join(self.ckp.dir, 'model', 'model_{}.pt'.format(epoch)))
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = self.scheduler.get_lr()[0] self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format( epoch, Decimal(lr))) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() for batch, item in tqdm(enumerate(self.loader_train)): ( lrs, hrs, _, ) = item idx_scale = 3 timer_data.hold() timer_model.tic() self.optimizer.zero_grad() lossF = torch.zeros(1).cuda() lossS = torch.zeros(1).cuda() estimate = torch.zeros(hrs[:, 0].size()).cuda() lr = torch.zeros(lrs[:, 0].size()).cuda() for frame in range(self.args.n_frame): lr, lr_ = lrs[:, frame], lr #lrs[:, frame] hr = hrs[:, frame] lr, lr_, hr = self.prepare([lr, lr_, hr]) sr, lre = self.model(lr, lr_, estimate) lossF += self.loss(lre, lr) lossS += self.loss(sr, hr) estimate = sr loss = lossF + lossS if loss.item() < self.args.skip_threshold * self.error_last: loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format( batch + 1, loss.item())) timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.scale))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for idx_scale, scale in enumerate(self.scale): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for idx_img, ( lrs, hrs, filename, ) in enumerate(tqdm_test): filename = filename[0] no_eval = (hrs.nelement() == 1) estimate = torch.zeros(hrs[:, 0].size()).cuda() lr = torch.zeros(lrs[:, 0].size()).cuda() srs = torch.zeros(hrs.size()) #.cuda() for frame in range(self.args.n_frame): lr, lr_ = lrs[:, frame], lr #lrs[:, frame] hr = hrs[:, frame] lr, lr_, hr = self.prepare([lr, lr_, hr]) sr, lre = self.model(lr, lr_, estimate) estimate = sr srs[0, frame].copy_(sr[0]) ''' if not no_eval: lr, hr = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(lr, idx_scale) ''' sr = utility.quantize(sr, self.args.rgb_range) utils.save_image( torch.cat([sr, hr], -1) / 255, 'patch/{}.png'.format(idx_img)) save_list = [sr] if not no_eval: eval_acc += utility.calc_psnr( srs, hrs, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[-1, idx_scale] = eval_acc / len(self.loader_test) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( self.args.data_test, scale, self.ckp.log[-1, idx_scale], best[0][idx_scale], best[1][idx_scale] + 1)) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0] + 1 == epoch))
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) if self.args.data_test[0] != 'DemoSplit': for lr, hr, filename in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) else: for lr, patch_num, filename in tqdm(d, ncols=80): step = int(patch_num[0]*patch_num[1])/self.args.batch_size step = math.ceil(step) lr = torch.squeeze(lr, 0) for i in range(step): index = i*self.args.batch_size if(i+self.args.batch_size<patch_num[0]*patch_num[1]): lr_batch, hr = self.prepare(lr[index:index+self.args.batch_size, :, :, :], torch.zeros((1))) else: lr_batch, hr = self.prepare(lr[index:patch_num[0]*patch_num[1], :, :, :], torch.zeros((1))) sr = self.model(lr_batch, idx_scale) if(i == 0): whole_sr = sr else: whole_sr = torch.cat((whole_sr, sr), 0) target_img = torch.zeros((patch_num[0]*self.args.patch_size, patch_num[1]*self.args.patch_size, self.args.n_colors)) for i in range(patch_num[0]): for j in range(patch_num[1]): a = whole_sr[i*patch_num[1]+j, :, :, :] target_img[i * self.args.patch_size:(i + 1) * self.args.patch_size, j * self.args.patch_size:(j + 1) * self.args.patch_size, :] \ = torch.squeeze(a.permute(2, 3, 1, 0), 3) target_img = target_img.permute(2, 0, 1) target_img = utility.quantize(target_img, self.args.rgb_range) target_img = target_img.cpu().clone() unloader = transforms.ToPILImage() target_img = PIL.ImageOps.invert(unloader(target_img)) target_img.save(os.path.join(self.args.dir_demo, 'result_'+filename[0]+'.png')) save_list = [] if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True)
def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() + 1 if (self.args.start_test > epoch and (not self.args.test_only)) or (self.args.not_test): torch.set_grad_enabled(True) self.ckp.save_for_resume(self, epoch) #保存模型 优化器 psnr等 return self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale))) self.model.eval() f = open('1.txt', 'w') print(self.model.state_dict(), file=f) f.close() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): #idx 表示第几个测试集 for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename, _ in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) #index and value self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + self.args.start_test)) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if (not self.args.test_only): self.ckp.save( self, epoch, is_best=(best[1][0, 0] + self.args.start_test == epoch)) #保存模型 优化器 psnr等 self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True) torch.set_grad_enabled(True)