validate(i, val_dataloader, model, criterion, val_loss_meter,
                         val_psnr_meter, writer, config)

                writer.add_scalar('loss/val_loss', val_loss_meter.avg, i)
                writer.add_scalar('psnr/val_psnr', val_psnr_meter.avg, i)

                format_str = '===> Iter [{:d}/{:d}] Val_Loss: {:.6f}, Val_PSNR: {:.4f}'
                print(
                    format_str.format(i, config['training']['iterations'],
                                      val_loss_meter.avg, val_psnr_meter.avg))
                sys.stdout.flush()

                if val_psnr_meter.avg >= best_val_psnr:
                    best_val_psnr = val_psnr_meter.avg
                    ckpt = {
                        'model': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'scheduler': scheduler.state_dict(),
                        'best_val_psnr': best_val_psnr,
                        'iter': i
                    }
                    path = '{}/{}/{}_{}.pth'.format(
                        config['training']['checkpoint_folder'],
                        os.path.basename(args.config)[:-5],
                        os.path.basename(args.config)[:-5], i)
                    torch.save(ckpt, path)

                val_loss_meter.reset()
                val_psnr_meter.reset()

            if i >= config['training']['iterations']:
Exemple #2
0
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)

    train_set = Train(args.training_set, scale=args.scale, patch_size=args.patch_size)
    trainloader = DataLoader(train_set, batch_size=args.batch_size,
                              shuffle=True, num_workers=args.num_workers, pin_memory=True)

    val_set = Validation(args.val_set)
    valloader = DataLoader(val_set, batch_size=1,
                            shuffle=True, num_workers=args.num_workers, pin_memory=True)
    best_epoch = 0
    best_PSNR = 0.0
    loss_plot = []
    psnr_plot = []
    best_weights = copy.deepcopy(net.state_dict())


    for epoch in range(args.epoch):
        net.train()
        epoch_loss = AverageMeter()

        for data in trainloader:
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)
            preds = net(inputs)

            preds_3d = preds.repeat(1, 3, 1, 1)
            labels_3d = labels.repeat(1, 3, 1, 1)
Exemple #3
0
            #     plt.xticks([])
            #     plt.yticks([])

            #     plt.subplot(len(outs), 3, j + 1 + len(outs))
            #     plt.imshow(out.squeeze().clamp(min=0, max=1).cpu().numpy(), cmap='gray')
            #     plt.xticks([])
            #     plt.yticks([])

            #     plt.subplot(len(outs), 3, j + 1 + 2*len(outs))
            #     plt.imshow(y_pred.squeeze().clamp(min=0, max=1).cpu().numpy(), cmap='gray')
            #     plt.xticks([])
            #     plt.yticks([])

            #     plt.show()

    print('Prediction takes {} (s)'.format(
        (time.time() - start) / datasizes['val']))
    print('Validation loss:', val_loss / datasizes['val'])
    scheduler.step(val_loss)

    if val_loss < best_val_loss:
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }, 'models/unet_bn_{}.pth'.format(current_time))
        best_val_loss = val_loss

# for i, (ys, ress) in enumerate(dataloaders['train']):