Beispiel #1
0
def test(args):
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    model = EDSR(num_layers=args.layers,
                 feature_size=args.featuresize).to(device)
    model = nn.DataParallel(model, device_ids=range(
        torch.cuda.device_count())).to(device)
    model.load_state_dict(torch.load(args.savedir))
    model.eval()

    test_dataset = Patches(root=args.path, phase='train')
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batchsize)

    for data in test_loader:
        data50, data100 = data
        data50 = data50.to(device)

        _, out_imgs = model(data50)

        res = []
        for idx in range(len(out_imgs)):
            images = []
            ## input
            data50_cpu = torch.squeeze(data50[idx]).cpu()
            images.append(transforms.ToPILImage()(data50_cpu).convert("RGB"))
            ## output
            output = torch.squeeze(out_imgs[idx]).cpu()
            images.append(transforms.ToPILImage()(output).convert("RGB"))
            # origin
            data100_cpu = torch.squeeze(data100[idx])
            images.append(transforms.ToPILImage()(data100_cpu).convert("RGB"))
            print(idx, len(images))
            res.append(images)

        fig = plt.figure(figsize=(7, 8))
        rows = args.batchsize
        cols = 3

        titles = ['input', 'output', 'origin']
        axes = []
        for r in range(rows):
            for c in range(cols):
                axes.append(fig.add_subplot(rows, cols, (r * cols + c) + 1))
                subplot_title = titles[c]
                axes[-1].set_title(subplot_title)
                plt.imshow(res[r][c])
                plt.savefig('res.png', dpi=300)

        # for i in range(cols*rows):
        #     axes.append(fig.add_subplot(rows, cols, i+1))
        #     subplot_title = titles[i]
        #     axes[-1].set_title(subplot_title)
        #     plt.imshow(images[i])
        #     plt.savefig('res.png', dpi=300)

        plt.show()
Beispiel #2
0
def Test(MODEL_NAME,
         UPSCALE_FACTOR,
         is_save=False,
         IMAGE_DIR=r'data\testing_lr_images',
         TEST_MODE=True):
    if type(MODEL_NAME) is EDSR or type(MODEL_NAME) is WDSR or type(
            MODEL_NAME) is SRResnet:
        model = MODEL_NAME

    else:
        model = EDSR(UPSCALE_FACTOR).eval()
        if TEST_MODE:
            model.cuda()
            model.load_state_dict(torch.load('epochs/' + MODEL_NAME))
        else:
            model.load_state_dict(
                torch.load('epochs/' + MODEL_NAME,
                           map_location=lambda storage, loc: storage))

    print('\n----------------------------------------------------------')
    imgs = []
    with torch.no_grad():
        model.eval()
        for image_name in glob.glob(os.path.join(IMAGE_DIR, '*.*')):
            image = Image.open(image_name)
            image = ToTensor()(image).unsqueeze(0)
            if TEST_MODE:
                image = image.cuda()

            start = time.time()
            out = model(image)
            elapsed = (time.time() - start)

            out_img = ToPILImage()(torch.clip(out[0], 0, 1))
            if is_save:
                out_img.save(
                    f'data/testing_sr_images/{os.path.basename(image_name)}')

            sr_img = gpu_to_numpy(out[0], is_squeeze=False)
            imgs.append(sr_img)
            plot_hr_lr(sr_img, image)
            print('cost time: ' + str(elapsed) + 's')

    return imgs
Beispiel #3
0
def main(args):
    cfg = cfg_dict[args.cfg_name]
    writer = SummaryWriter(os.path.join("runs", args.cfg_name))
    train_loader = get_data_loader(cfg, cfg["train_dir"])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = EDSR(cfg).to(device)
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg["init_lr"],
                                 betas=(0.9, 0.999), eps=1e-8)

    global_batches = 0
    if args.train:
        for epoch in range(cfg["n_epoch"]):
            model.train()
            running_loss = 0.0
            for i, batch in enumerate(train_loader):
                lr, hr = batch[0].to(device), batch[1].to(device)
                optimizer.zero_grad()
                sr = model(lr)
                loss = model.loss(sr, hr)
                # loss = criterion(model(lr), hr)
                running_loss += loss.item()
                loss.backward()
                optimizer.step()
                global_batches += 1
                if global_batches % cfg["lr_decay_every"] == 0:
                    for param_group in optimizer.param_groups:
                        print(f"decay lr to {param_group['lr'] / 10}")
                        param_group["lr"] /= 10

            if epoch % args.log_every == 0:
                model.eval()
                with torch.no_grad():
                    batch_samples = {"lr": batch[0], "hr": batch[1], 
                                     "sr": sr.cpu()}
                    writer.add_scalar("training-loss", 
                                      running_loss / len(train_loader),
                                      global_step=global_batches)
                    writer.add_scalar("PSNR", compute_psnr(batch_samples), 
                                      global_step=global_batches)
                    samples = {k: v[:3] for k, v in batch_samples.items()}
                    fig = visualize_samples(samples, f"epoch-{epoch}")
                    writer.add_figure("sample-visualization", fig, 
                                      global_step=global_batches)

            if epoch % args.save_every == 0:
                state = {"net": model.state_dict(), 
                         "optim": optimizer.state_dict()}
                checkpoint_dir = args.checkpoint_dir
                if not os.path.exists(checkpoint_dir):
                    os.makedirs(checkpoint_dir)
                path = os.path.join(checkpoint_dir, args.cfg_name)
                torch.save(state, path)
    
    # eval
    if args.eval:
        assert args.model_path and args.lr_img_path
        print(f"evaluating {args.lr_img_path}")
        state = torch.load(args.model_path, map_location=device)
        model.load_state_dict(state["net"])
        optimizer.load_state_dict(state["optim"])

        with torch.no_grad():
            lr = img2tensor(args.lr_img_path)
            sr = model(lr.clone().to(device)).cpu()
            samples = {"lr": lr, "sr": sr}
            if args.hr_img_path:
                samples["hr"] = img2tensor(args.hr_img_path)
                print(f"PSNR: {compute_psnr(samples)}")
            directory = os.path.dirname(args.lr_img_path)
            name = f"eval-{args.cfg_name}-{args.lr_img_path.split('/')[-1]}"
            visualize_samples(samples, name, save=True, 
                              directory=directory, size=6)
    os.makedirs(output_dir)

if device_mode == 'CPU':
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    device = torch.device("cpu")
else:
    os.environ['CUDA_VISIBLE_DEVICES'] = device_gpu_id
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

with torch.no_grad():
    checkpoint = torch.load(checkpoint_path)

    model = EDSR(upscale=scale)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()

    fs = glob.glob(os.path.join(input_dir, input_suffix))
    psnrs = []
    for f in fs:
        img = misc.imread(f)
        lr_img = misc.imresize(img, 1.0 / scale, 'bicubic')
        bic_img = misc.imresize(lr_img, scale * 1.0, 'bicubic')
        lr_y = utils.rgb2ycbcr(lr_img)[:, :, 0]
        bic_ycbcr = utils.rgb2ycbcr(bic_img)
        bic_y = bic_ycbcr[:, :, 0]

        lr_y = torch.from_numpy(lr_y).unsqueeze(0).unsqueeze(0).float().to(
            device) / 255.0
        bic_y = torch.from_numpy(bic_y).unsqueeze(0).unsqueeze(0).float().to(
            device) / 255.0