loss = mse_loss(preds, labels) epoch_losses.update(loss.item(), len(inputs)) optimizer.zero_grad() loss.backward() optimizer.step() t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg)) t.update(len(inputs)) torch.save(net.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch))) net.eval() epoch_psnr = AverageMeter() for data in test_dataloader: inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) with torch.no_grad(): preds = net(inputs).clamp(0.0, 1.0) loss = img_psnr(preds, labels) epoch_psnr.update(loss, len(inputs)) print('eval psnr: {:.2f}'.format(epoch_psnr.avg))
# Using the cuda cudnn.benchmark = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load the param dataset = SRGanDataset(gt_path=args.gt_file, lr_path=args.lr_file, in_memory=False, transform=None) loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=8) # Define the net model = SRResNet(16, args.scale) model.load_state_dict(torch.load(args.weights_file)) model = model.to(device) model.eval() index = 0 with torch.no_grad(): for data in loader: index = index + 1 lr, gt = data lr = lr.to(device) gt = gt.to(device) _, _, height, weight = lr.size() gt = gt[:, :, :height * args.scale, :weight * args.scale] output = model(lr)