def lerp_img_evaluation(hr_img, lerp_img): lerp_img_y = lerp_img.split()[0] hr_img_y = hr_img.split()[0] img_to_tensor = ToTensor() lerp_img_y_tensor = img_to_tensor(lerp_img_y).view(1, -1, lerp_img_y.size[1], lerp_img_y.size[0]) hr_img_y_tensor = img_to_tensor(hr_img_y).view(1, -1, hr_img_y.size[1], hr_img_y.size[0]) psnr = metrics.psnr(lerp_img_y_tensor, hr_img_y_tensor) nrmse = metrics.nrmse(lerp_img_y_tensor, hr_img_y_tensor) ssim = metrics.ssim(lerp_img_y_tensor, hr_img_y_tensor) return psnr, nrmse, ssim
def validating(self, model, dataset): """ input: model: (object) pytorch model batch_size: (int) dataset : (object) dataset return [val_mse, val_loss] """ args = self.args """ metrics """ val_loss, val_psnr, val_nrmse, val_ssim = 0, 0, 0, 0 data_loader = DataLoader(dataset=dataset, batch_size=args.valbatch_size, num_workers=args.threads, shuffle=False) batch_iterator = iter(data_loader) steps = len(dataset) // args.valbatch_size # model.eval() start = time.time() for step in range(steps): x, y = next(batch_iterator) x = x.to(self.device) y = y.to(self.device) # calculate pixel accuracy of generator gen_y = model(x) """ metrics """ val_loss += F.mse_loss(gen_y, y).item() val_psnr += metrics.psnr(gen_y.data, y.data) val_nrmse += metrics.nrmse(gen_y.data, y.data) val_ssim += metrics.ssim(gen_y.data, y.data) # val_vifp += metrics.vifp(gen_y.data, y.data) _time = time.time() - start nb_samples = steps * args.valbatch_size """ metrics """ val_log = [ val_loss / steps, val_psnr / steps, val_nrmse / steps, val_ssim / steps, _time, nb_samples / _time ] self.val_log = [round(x, 3) for x in val_log]
def Y_to_RGB(self): args = self.args img_path = self.img_path model = self.model img_to_tensor = ToTensor() hr_img = Image.open(img_path).convert('YCbCr') hr_img_y = hr_img.split()[0] hr_img_y_tensor = img_to_tensor(hr_img_y).view(1, -1, hr_img_y.size[1], hr_img_y.size[0]) hr_img_Cb = hr_img.split()[1] hr_img_Cr = hr_img.split()[2] if args.interpolation: args.upscale_factor = 1 lr_img_y = TF.resize(hr_img_y, (hr_img_y.size[0] // args.upscale_factor, hr_img_y.size[1] // args.upscale_factor)) lr_img_y_tensor = img_to_tensor(lr_img_y).view(1, -1, lr_img_y.size[1], lr_img_y.size[0]) input = lr_img_y_tensor if args.cuda: model = model.cuda() input = input.cuda() sr_img_y_tensor = model(input) sr_img_y_tensor = sr_img_y_tensor.cpu() """ metrics """ psnr = metrics.psnr(sr_img_y_tensor, hr_img_y_tensor) nrmse = metrics.nrmse(sr_img_y_tensor, hr_img_y_tensor) ssim = metrics.ssim(sr_img_y_tensor, hr_img_y_tensor) sr_img_y = sr_img_y_tensor[0].detach().numpy() sr_img_y *= 255.0 sr_img_y = sr_img_y.clip(0, 255) sr_img_y = Image.fromarray(np.uint8(sr_img_y[0]), mode='L') sr_img_Cb = hr_img_Cb sr_img_Cr = hr_img_Cr sr_img = Image.merge('YCbCr', [sr_img_y, sr_img_Cb, sr_img_Cr]).convert('RGB') return sr_img, (psnr, ssim, nrmse)
def evaluating(self, model, dataset, split): """ Evaluate overall performance of the model input: model: (object) pytorch model dataset: (object) dataset split: (str) split of dataset in ['train', 'val', 'test'] return [overall_accuracy, precision, recall, f1-score, jaccard, kappa] """ args = self.args # oa, precision, recall, f1, jac, kappa = 0, 0, 0, 0, 0, 0 """ metrics """ # psnr, nrmse, ssim, vifp, fsim psnr, nrmse, ssim = 0, 0, 0 model.eval() data_loader = DataLoader(dataset, args.evalbatch_size, num_workers=4, shuffle=False) batch_iterator = iter(data_loader) steps = len(dataset) // args.evalbatch_size start = time.time() for step in range(steps): x, y = next(batch_iterator) if args.cuda: x = x.cuda() y = y.cuda() # calculate pixel accuracy of generator """ metrics """ gen_y = model(x) psnr += metrics.psnr(gen_y, y) nrmse += metrics.nrmse(gen_y, y) ssim += metrics.ssim(gen_y, y) # vifp += metrics.vifp(gen_y.data, y.data) _time = time.time() - start if not os.path.exists(os.path.join(Logs_DIR, 'statistic')): os.makedirs(os.path.join(Logs_DIR, 'statistic')) # recording performance of the model nb_samples = steps * args.evalbatch_size fps = nb_samples / _time basic_info = [ self.date, self.method, self.epoch, self.iter, nb_samples, _time, fps ] basic_info_names = [ 'date', 'method', 'epochs', 'iters', 'nb_samples', 'time(sec)', 'fps' ] """ metrics """ perform = [round(idx / steps, 3) for idx in [psnr, nrmse, ssim]] perform_names = ['psnr', 'nrmse', 'ssim'] cur_log = pd.DataFrame([basic_info + perform], columns=basic_info_names + perform_names) # save performance if os.path.exists( os.path.join(Logs_DIR, 'statistic', "{}.csv".format(split))): logs = pd.read_csv( os.path.join(Logs_DIR, 'statistic', "{}.csv".format(split))) else: logs = pd.DataFrame([]) logs = logs.append(cur_log, ignore_index=True) logs.to_csv(os.path.join(Logs_DIR, 'statistic', "{}.csv".format(split)), index=False, float_format='%.3f')
def training(self, net, datasets, verbose=False): """ input: net: (object) model & optimizer datasets : (list) [train, val] dataset object """ args = self.args steps = len(datasets[0]) // args.batch_size if args.trigger == 'epoch': args.epochs = args.nEpochs args.iters = steps * args.nEpochs args.iter_interval = steps * args.interval else: args.iters = args.nEpochs args.epochs = args.nEpochs // steps + 1 args.iter_interval = args.interval start = time.time() for epoch in range(1, args.epochs + 1): self.epoch = epoch # setup data loader data_loader = DataLoader(dataset=datasets[0], batch_size=args.batch_size, num_workers=args.threads, shuffle=False) batch_iterator = iter(data_loader) """ metrics """ epoch_loss, epoch_psnr = 0, 0 for step in range(steps): self.iter += 1 if self.iter > args.iters: self.iter -= 1 break x, y = next(batch_iterator) x = x.to(self.device) y = y.to(self.device) # training gen_y = net(x) loss = F.mse_loss(gen_y, y) # Update generator parameters net.optimizer.zero_grad() loss.backward() net.optimizer.step() """ metrics """ epoch_loss += loss.item() epoch_psnr += metrics.psnr(gen_y.data, y.data) # epoch_nrmse += metrics.nrmse(gen_y.data, y.data) # epoch_ssim += metrics.ssim(gen_y.data, y.data) # epoch_vifp += metrics.vifp(gen_y.data, y.data) if verbose: print( "===> Epoch[{}]({}/{}): Loss: {:.4f}; \t PSNR: {:.4f}". format(epoch, step + 1, steps, loss.item(), metrics.psnr(gen_y.data, y.data))) # logging if self.iter % args.iter_interval == 0: _time = time.time() - start nb_samples = args.iter_interval * args.batch_size """ metrics """ loss_log = loss.item() psnr_log = metrics.psnr(gen_y.data, y.data) nrmse_log = metrics.nrmse(gen_y.data, y.data) ssim_log = metrics.ssim(gen_y.data, y.data) # vifp_log = metrics.ssim(gen_y.data, y.data) train_log = [ loss_log, psnr_log, nrmse_log, ssim_log, _time, nb_samples / _time ] # train_log = [log_loss / args.iter_interval, log_psnr / # args.iter_interval, _time, nb_samples / _time] self.train_log = [round(x, 3) for x in train_log] self.validating(net, datasets[1]) self.logging(verbose=True) if self.args.middle_checkpoint: model_name_dir = "up{}_{}_{}_{}_{}".format( self.args.upscale_factor, self.method, self.args.trigger, self.args.nEpochs, self.date) self.save_middle_checkpoint(net, self.epoch, self.iter, model_name_dir) # reinitialize start = time.time() # log_loss, log_psnr = 0, 0 print( "===> Epoch {} Complete: Avg. Loss: {:.4f}; \t Avg. PSNR: {:.4f}" .format(epoch, epoch_loss / steps, epoch_psnr / steps)) """ metrics """ epoch_loss, epoch_psnr = 0, 0