Exemple #1
0
        sr = netG(lr)

        batch_mse = ((sr - hr)**2).data.mean()
        valing_results['mse'] += batch_mse * batch_size
        batch_ssim = pytorch_ssim.ssim(sr, hr).data[0]
        valing_results['ssims'] += batch_ssim * batch_size
        valing_results['psnr'] = 10 * log10(
            1 / (valing_results['mse'] / valing_results['batch_sizes']))
        valing_results[
            'ssim'] = valing_results['ssims'] / valing_results['batch_sizes']
        val_bar.set_description(
            desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f'
            % (valing_results['psnr'], valing_results['ssim']))

        val_images.extend([
            display_transform()(val_hr_restore.squeeze(0)),
            display_transform()(hr.data.cpu().squeeze(0)),
            display_transform()(sr.data.cpu().squeeze(0))
        ])
    val_images = torch.stack(val_images)
    val_images = torch.chunk(val_images, val_images.size(0) // 15)
    val_save_bar = tqdm(val_images, desc='[saving training results]')
    index = 1
    for image in val_save_bar:
        image = utils.make_grid(image, nrow=3, padding=5)
        utils.save_image(image,
                         out_path + 'epoch_%d_index_%d.png' % (epoch, index),
                         padding=5)
        index += 1

    # save model parameters
for image_name, lr_image, hr_restore_img, hr_image in test_bar:
    image_name = image_name[0]
    lr_image = Variable(lr_image, volatile=True)
    hr_image = Variable(hr_image, volatile=True)
    # if torch.cuda.is_available():
    #   lr_image = lr_image.cuda()
    #   hr_image = hr_image.cuda()

    sr_image = model(lr_image)
    mse = ((hr_image - sr_image)**2).data.mean()
    psnr = 10 * log10(1 / mse)
    ssim = pytorch_ssim.ssim(sr_image, hr_image).data[0]

    test_images = torch.stack([
        display_transform()(hr_restore_img.squeeze(0)),
        display_transform()(hr_image.data.cpu().squeeze(0)),
        display_transform()(sr_image.data.cpu().squeeze(0))
    ])
    image = utils.make_grid(test_images, nrow=3, padding=5)
    utils.save_image(image, OUT_PATH + image_name, padding=5)

    # save psnr\ssim
    results['psnr'].append(psnr)
    results['ssim'].append(ssim)

out_path = 'logs/statistics/'
saved_results = {'psnr': [], 'ssim': []}
for item in results.values():
    psnr = np.array(item['psnr'])
    ssim = np.array(item['ssim'])
Exemple #3
0
         valing_results['mse'] += batch_mse * batch_size
         # valing_results['mse'] = valing_results['mse'] + batch_mse * batch_size
         valing_results['ssims'] += batch_ssim * batch_size
         # batch_ssim = pytorch_ssim.ssim(sr, hr).item()
         valing_results['ssims'] = valing_results['ssims'] + batch_ssim * batch_size
         valing_results['psnr'] = 10 * log10((hr.max()**2) / (valing_results['mse'] / valing_results['batch_sizes']))
         valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']
         val_bar.set_description(
             desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (
                 valing_results['psnr'], valing_results['ssim']))
 
         # val_images.extend(
         #     [display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.data.cpu().squeeze(0)),
         #      display_transform()(sr.data.cpu().squeeze(0))])
         val_images.extend(
             [display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.detach().cpu().squeeze(0)),
              display_transform()(sr.detach().cpu().squeeze(0))])
     val_images = torch.stack(val_images)
     val_images = torch.chunk(val_images, val_images.size(0) // 15)
     val_save_bar = tqdm(val_images, desc='[saving training results]')
     # val_image = torch.stack(val_images)
     # val_images = torch.chunk(val_image, val_image.size(0) // 15)
     # val_save_bar = tqdm(val_images, desc='[saving training results]')
     index = 1
     for image in val_save_bar:
         image = utils.make_grid(image, nrow=3, padding=5)
         utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
         # img = utils.make_grid(image, nrow=3, padding=5)
         # utils.save_image(img, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
         index += 1
         # index = index + 1
Exemple #4
0
    image_name = image_name[0]
    lr_image = Variable(lr_image, volatile=True)
    hr_image = Variable(hr_image, volatile=True)
    if torch.cuda.is_available():
        lr_image = lr_image.cuda()
        hr_image = hr_image.cuda()
    # print(lr_image.size())
    sr_image = model(lr_image)
    # print(sr_image.size())
    # print(hr_image.size())
    mse = ((hr_image - sr_image)**2).data.mean()
    psnr = 10 * log10(1 / mse)
    ssim = pytorch_ssim.ssim(sr_image, hr_image).data[0]

    test_images = torch.stack([
        display_transform()(hr_restore_img.squeeze(0)),
        display_transform()(hr_image.data.cpu().squeeze(0)),
        display_transform()(sr_image.data.cpu().squeeze(0))
    ])
    image = utils.make_grid(test_images, nrow=3, padding=5)
    utils.save_image(image,
                     out_path + image_name.split('.')[0] +
                     '_psnr_%.4f_ssim_%.4f.' % (psnr, ssim) +
                     image_name.split('.')[-1],
                     padding=5)

    # save psnr\ssim
    # print(image_name)
    results['psnr'].append(psnr)
    results['ssim'].append(ssim)
                batch_mse = ((sr - hr)**2).data.mean()
                valing_results['mse'] += batch_mse * batch_size
                batch_ssim = pytorch_ssim.ssim(sr, hr).item()
                valing_results['ssims'] += batch_ssim * batch_size
                valing_results['psnr'] = 10 * log10(
                    1 /
                    (valing_results['mse'] / valing_results['batch_sizes']))
                valing_results['ssim'] = valing_results[
                    'ssims'] / valing_results['batch_sizes']
                val_bar.set_description(
                    desc=
                    '[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f'
                    % (valing_results['psnr'], valing_results['ssim']))

                image1 = utils.make_grid(display_transform()(
                    val_hr_restore.squeeze(0)),
                                         nrow=3,
                                         padding=5)
                # image1.,
                # print(image1.size())
                utils.save_image(image1,
                                 out_path +
                                 'epoch_%d_index_%d_hr_restore.png' %
                                 (epoch, index),
                                 padding=5)

                image2 = utils.make_grid(display_transform()(
                    hr.data.cpu().squeeze(0)),
                                         nrow=3,
                                         padding=5)
                # print(image2.size())
Exemple #6
0
    for image_name, lr_image, hr_restore_img, hr_image in test_bar:
        image_name = image_name[0]
        with torch.no_grad():
            lr_image = Variable(lr_image)
            hr_image = Variable(hr_image)
        if torch.cuda.is_available():
            lr_image = lr_image.cuda()
            hr_image = hr_image.cuda()
        with torch.no_grad():
            sr_image = model(lr_image)
        mse = ((hr_image - sr_image) ** 2).data.mean()
        psnr = 10 * log10(1 / mse)
        ssim = pytorch_ssim.ssim(sr_image, hr_image).data

        test_images = torch.stack(
            [display_transform()(lr_image.squeeze(0)), display_transform()(sr_image.data.cpu().squeeze(0)),
             display_transform()(hr_image.data.cpu().squeeze(0))])
        image = utils.make_grid(test_images, nrow=3, padding=5)
        utils.save_image(image, out_path + image_name.split('.')[0] + '_psnr_%.4f_ssim_%.4f.' % (psnr, ssim) +
                         image_name.split('.')[-1], padding=5)

        results[image_name.split('_')[0]]['psnr'].append(psnr)
        results[image_name.split('_')[0]]['ssim'].append(ssim)

    out_path = 'statistics/'
    saved_results = {'psnr': [], 'ssim': []}
    for item in results.values():
        psnr = np.array(item['psnr'])
        ssim = np.array([i.item() for i in item['ssim']])
        if (len(psnr) == 0) or (len(ssim) == 0):
            psnr = 'No data'
Exemple #7
0
def main():
    parser = ArgumentParser()
    parser.add_argument("--augmentation", action='store_true')
    parser.add_argument("--train-dataset-percentage", type=float, default=100)
    parser.add_argument("--val-dataset-percentage", type=int, default=100)
    parser.add_argument("--label-smoothing", type=float, default=0.9)
    parser.add_argument("--validation-frequency", type=int, default=1)
    args = parser.parse_args()

    ENABLE_AUGMENTATION = args.augmentation
    TRAIN_DATASET_PERCENTAGE = args.train_dataset_percentage
    VAL_DATASET_PERCENTAGE = args.val_dataset_percentage
    LABEL_SMOOTHING_FACTOR = args.label_smoothing
    VALIDATION_FREQUENCY = args.validation_frequency

    if ENABLE_AUGMENTATION:
        augment_batch = AugmentPipe()
        augment_batch.to(device)
    else:
        augment_batch = lambda x: x
        augment_batch.p = 0

    NUM_ADV_EPOCHS = round(NUM_ADV_BASELINE_EPOCHS /
                           (TRAIN_DATASET_PERCENTAGE / 100))
    NUM_PRETRAIN_EPOCHS = round(NUM_BASELINE_PRETRAIN_EPOCHS /
                                (TRAIN_DATASET_PERCENTAGE / 100))
    VALIDATION_FREQUENCY = round(VALIDATION_FREQUENCY /
                                 (TRAIN_DATASET_PERCENTAGE / 100))

    training_start = datetime.datetime.now().isoformat()

    train_set = TrainDatasetFromFolder(train_dataset_dir,
                                       patch_size=PATCH_SIZE,
                                       upscale_factor=UPSCALE_FACTOR)
    len_train_set = len(train_set)
    train_set = Subset(
        train_set,
        list(
            np.random.choice(
                np.arange(len_train_set),
                int(len_train_set * TRAIN_DATASET_PERCENTAGE / 100), False)))

    val_set = ValDatasetFromFolder(val_dataset_dir,
                                   upscale_factor=UPSCALE_FACTOR)
    len_val_set = len(val_set)
    val_set = Subset(
        val_set,
        list(
            np.random.choice(np.arange(len_val_set),
                             int(len_val_set * VAL_DATASET_PERCENTAGE / 100),
                             False)))

    train_loader = DataLoader(dataset=train_set,
                              num_workers=8,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              pin_memory=True,
                              prefetch_factor=8)
    val_loader = DataLoader(dataset=val_set,
                            num_workers=2,
                            batch_size=VAL_BATCH_SIZE,
                            shuffle=False,
                            pin_memory=True,
                            prefetch_factor=2)

    epoch_validation_hr_dataset = HrValDatasetFromFolder(
        val_dataset_dir)  # Useful to compute FID metric

    results_folder = Path(
        f"results_{training_start}_CS:{PATCH_SIZE}_US:{UPSCALE_FACTOR}x_TRAIN:{TRAIN_DATASET_PERCENTAGE}%_AUGMENTATION:{ENABLE_AUGMENTATION}"
    )
    results_folder.mkdir(exist_ok=True)
    writer = SummaryWriter(str(results_folder / "tensorboard_log"))
    g_net = Generator(n_residual_blocks=NUM_RESIDUAL_BLOCKS,
                      upsample_factor=UPSCALE_FACTOR)
    d_net = Discriminator(patch_size=PATCH_SIZE)
    lpips_metric = lpips.LPIPS(net='alex')

    g_net.to(device=device)
    d_net.to(device=device)
    lpips_metric.to(device=device)

    g_optimizer = optim.Adam(g_net.parameters(), lr=1e-4)
    d_optimizer = optim.Adam(d_net.parameters(), lr=1e-4)

    bce_loss = BCELoss()
    mse_loss = MSELoss()

    bce_loss.to(device=device)
    mse_loss.to(device=device)
    results = {
        'd_total_loss': [],
        'g_total_loss': [],
        'g_adv_loss': [],
        'g_content_loss': [],
        'd_real_mean': [],
        'd_fake_mean': [],
        'psnr': [],
        'ssim': [],
        'lpips': [],
        'fid': [],
        'rt': [],
        'augment_probability': []
    }

    augment_probability = 0
    num_images = len(train_set) * (NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS)
    prediction_list = []
    rt = 0

    for epoch in range(1, NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS + 1):
        train_bar = tqdm(train_loader, ncols=200)
        running_results = {
            'batch_sizes': 0,
            'd_epoch_total_loss': 0,
            'g_epoch_total_loss': 0,
            'g_epoch_adv_loss': 0,
            'g_epoch_content_loss': 0,
            'd_epoch_real_mean': 0,
            'd_epoch_fake_mean': 0,
            'rt': 0,
            'augment_probability': 0
        }
        image_percentage = epoch / (NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS) * 100
        g_net.train()
        d_net.train()

        for data, target in train_bar:
            augment_batch.p = torch.tensor([augment_probability],
                                           device=device)
            batch_size = data.size(0)
            running_results["batch_sizes"] += batch_size
            target = target.to(device)
            data = data.to(device)
            real_labels = torch.ones(batch_size, device=device)
            fake_labels = torch.zeros(batch_size, device=device)

            if epoch > NUM_PRETRAIN_EPOCHS:
                # Discriminator training
                d_optimizer.zero_grad(set_to_none=True)

                d_real_output = d_net(augment_batch(target))
                d_real_output_loss = bce_loss(
                    d_real_output, real_labels * LABEL_SMOOTHING_FACTOR)

                fake_img = g_net(data)
                d_fake_output = d_net(augment_batch(fake_img))
                d_fake_output_loss = bce_loss(d_fake_output, fake_labels)

                d_total_loss = d_real_output_loss + d_fake_output_loss
                d_total_loss.backward()
                d_optimizer.step()

                d_real_mean = d_real_output.mean()
                d_fake_mean = d_fake_output.mean()

            # Generator training
            g_optimizer.zero_grad(set_to_none=True)

            fake_img = g_net(data)
            if epoch > NUM_PRETRAIN_EPOCHS:
                adversarial_loss = bce_loss(d_net(augment_batch(fake_img)),
                                            real_labels) * ADV_LOSS_BALANCER
                content_loss = mse_loss(fake_img, target)
                g_total_loss = content_loss + adversarial_loss
            else:
                adversarial_loss = mse_loss(torch.zeros(
                    1, device=device), torch.zeros(
                        1,
                        device=device))  # Logging purposes, it is always zero
                content_loss = mse_loss(fake_img, target)
                g_total_loss = content_loss

            g_total_loss.backward()
            g_optimizer.step()

            if epoch > NUM_PRETRAIN_EPOCHS and ENABLE_AUGMENTATION:
                prediction_list.append(
                    (torch.sign(d_real_output - 0.5)).tolist())
                if len(prediction_list) == RT_BATCH_SMOOTHING_FACTOR:
                    rt_list = [
                        prediction for sublist in prediction_list
                        for prediction in sublist
                    ]
                    rt = mean(rt_list)
                    if mean(rt_list) > AUGMENT_PROB_TARGET:
                        augment_probability = min(
                            0.85,
                            augment_probability + AUGMENT_PROBABABILITY_STEP)
                    else:
                        augment_probability = max(
                            0.,
                            augment_probability - AUGMENT_PROBABABILITY_STEP)
                    prediction_list.clear()

            running_results['g_epoch_total_loss'] += g_total_loss.to(
                'cpu', non_blocking=True).detach() * batch_size
            running_results['g_epoch_adv_loss'] += adversarial_loss.to(
                'cpu', non_blocking=True).detach() * batch_size
            running_results['g_epoch_content_loss'] += content_loss.to(
                'cpu', non_blocking=True).detach() * batch_size
            if epoch > NUM_PRETRAIN_EPOCHS:
                running_results['d_epoch_total_loss'] += d_total_loss.to(
                    'cpu', non_blocking=True).detach() * batch_size
                running_results['d_epoch_real_mean'] += d_real_mean.to(
                    'cpu', non_blocking=True).detach() * batch_size
                running_results['d_epoch_fake_mean'] += d_fake_mean.to(
                    'cpu', non_blocking=True).detach() * batch_size
                running_results['rt'] += rt * batch_size
                running_results[
                    'augment_probability'] += augment_probability * batch_size

            train_bar.set_description(
                desc=f'[{epoch}/{NUM_ADV_EPOCHS + NUM_PRETRAIN_EPOCHS}] '
                f'Loss_D: {running_results["d_epoch_total_loss"] / running_results["batch_sizes"]:.4f} '
                f'Loss_G: {running_results["g_epoch_total_loss"] / running_results["batch_sizes"]:.4f} '
                f'Loss_G_adv: {running_results["g_epoch_adv_loss"] / running_results["batch_sizes"]:.4f} '
                f'Loss_G_content: {running_results["g_epoch_content_loss"] / running_results["batch_sizes"]:.4f} '
                f'D(x): {running_results["d_epoch_real_mean"] / running_results["batch_sizes"]:.4f} '
                f'D(G(z)): {running_results["d_epoch_fake_mean"] / running_results["batch_sizes"]:.4f} '
                f'rt: {running_results["rt"] / running_results["batch_sizes"]:.4f} '
                f'augment_probability: {running_results["augment_probability"] / running_results["batch_sizes"]:.4f}'
            )

        if epoch == 1 or epoch == (
                NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS
        ) or epoch % VALIDATION_FREQUENCY == 0 or VALIDATION_FREQUENCY == 1:
            torch.cuda.empty_cache()
            gc.collect()
            g_net.eval()
            # ...
            images_path = results_folder / Path(f'training_images_results')
            images_path.mkdir(exist_ok=True)

            with torch.no_grad():
                val_bar = tqdm(val_loader, ncols=160)
                val_results = {
                    'epoch_mse': 0,
                    'epoch_ssim': 0,
                    'epoch_psnr': 0,
                    'epoch_avg_psnr': 0,
                    'epoch_avg_ssim': 0,
                    'epoch_lpips': 0,
                    'epoch_avg_lpips': 0,
                    'epoch_fid': 0,
                    'batch_sizes': 0
                }
                val_images = torch.empty((0, 0))
                epoch_validation_sr_dataset = None
                for lr, val_hr_restore, hr in val_bar:
                    batch_size = lr.size(0)
                    val_results['batch_sizes'] += batch_size
                    hr = hr.to(device=device)
                    lr = lr.to(device=device)

                    sr = g_net(lr)
                    sr = torch.clamp(sr, 0., 1.)
                    if not epoch_validation_sr_dataset:
                        epoch_validation_sr_dataset = SingleTensorDataset(
                            (sr.cpu() * 255).to(torch.uint8))

                    else:
                        epoch_validation_sr_dataset = ConcatDataset(
                            (epoch_validation_sr_dataset,
                             SingleTensorDataset(
                                 (sr.cpu() * 255).to(torch.uint8))))

                    batch_mse = ((sr - hr)**2).data.mean()  # Pixel-wise MSE
                    val_results['epoch_mse'] += batch_mse * batch_size
                    batch_ssim = pytorch_ssim.ssim(sr, hr).item()
                    val_results['epoch_ssim'] += batch_ssim * batch_size
                    val_results['epoch_avg_ssim'] = val_results[
                        'epoch_ssim'] / val_results['batch_sizes']
                    val_results['epoch_psnr'] += 20 * log10(
                        hr.max() / (batch_mse / batch_size)) * batch_size
                    val_results['epoch_avg_psnr'] = val_results[
                        'epoch_psnr'] / val_results['batch_sizes']
                    val_results['epoch_lpips'] += torch.mean(
                        lpips_metric(hr * 2 - 1, sr * 2 - 1)).to(
                            'cpu', non_blocking=True).detach() * batch_size
                    val_results['epoch_avg_lpips'] = val_results[
                        'epoch_lpips'] / val_results['batch_sizes']

                    val_bar.set_description(
                        desc=
                        f"[converting LR images to SR images] PSNR: {val_results['epoch_avg_psnr']:4f} dB "
                        f"SSIM: {val_results['epoch_avg_ssim']:4f} "
                        f"LPIPS: {val_results['epoch_avg_lpips']:.4f} ")
                    if val_images.size(0) * val_images.size(
                            1) < NUM_LOGGED_VALIDATION_IMAGES * 3:
                        if val_images.size(0) == 0:
                            val_images = torch.hstack(
                                (display_transform(CENTER_CROP_SIZE)
                                 (val_hr_restore).unsqueeze(0).transpose(0, 1),
                                 display_transform(CENTER_CROP_SIZE)(
                                     hr.data.cpu()).unsqueeze(0).transpose(
                                         0, 1),
                                 display_transform(CENTER_CROP_SIZE)(
                                     sr.data.cpu()).unsqueeze(0).transpose(
                                         0, 1)))
                        else:
                            val_images = torch.cat((
                                val_images,
                                torch.hstack(
                                    (display_transform(CENTER_CROP_SIZE)(
                                        val_hr_restore).unsqueeze(0).transpose(
                                            0, 1),
                                     display_transform(CENTER_CROP_SIZE)(
                                         hr.data.cpu()).unsqueeze(0).transpose(
                                             0, 1),
                                     display_transform(CENTER_CROP_SIZE)(
                                         sr.data.cpu()).unsqueeze(0).transpose(
                                             0, 1)))))
                val_results['epoch_fid'] = calculate_metrics(
                    epoch_validation_sr_dataset,
                    epoch_validation_hr_dataset,
                    cuda=True,
                    fid=True,
                    verbose=True
                )['frechet_inception_distance']  # Set batch_size=1 if you get memory error (inside calculate metric function)

                val_images = val_images.view(
                    (NUM_LOGGED_VALIDATION_IMAGES // 4, -1, 3,
                     CENTER_CROP_SIZE, CENTER_CROP_SIZE))
                val_save_bar = tqdm(val_images,
                                    desc='[saving validation results]',
                                    ncols=160)

                for index, image_batch in enumerate(val_save_bar, start=1):
                    image_grid = utils.make_grid(image_batch,
                                                 nrow=3,
                                                 padding=5)
                    writer.add_image(
                        f'progress{image_percentage:.1f}_index_{index}.png',
                        image_grid)

        # save loss / scores / psnr /ssim
        results['d_total_loss'].append(running_results['d_epoch_total_loss'] /
                                       running_results['batch_sizes'])
        results['g_total_loss'].append(running_results['g_epoch_total_loss'] /
                                       running_results['batch_sizes'])
        results['g_adv_loss'].append(running_results['g_epoch_adv_loss'] /
                                     running_results['batch_sizes'])
        results['g_content_loss'].append(
            running_results['g_epoch_content_loss'] /
            running_results['batch_sizes'])
        results['d_real_mean'].append(running_results['d_epoch_real_mean'] /
                                      running_results['batch_sizes'])
        results['d_fake_mean'].append(running_results['d_epoch_fake_mean'] /
                                      running_results['batch_sizes'])
        results['rt'].append(running_results['rt'] /
                             running_results['batch_sizes'])
        results['augment_probability'].append(
            running_results['augment_probability'] /
            running_results['batch_sizes'])
        if epoch == 1 or epoch == (
                NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS
        ) or epoch % VALIDATION_FREQUENCY == 0 or VALIDATION_FREQUENCY == 1:
            results['psnr'].append(val_results['epoch_avg_psnr'])
            results['ssim'].append(val_results['epoch_avg_ssim'])
            results['lpips'].append(val_results['epoch_avg_lpips'])
            results['fid'].append(val_results['epoch_fid'])

        for metric, metric_values in results.items():
            if epoch == 1 or epoch == (
                    NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS) or epoch % VALIDATION_FREQUENCY == 0 or VALIDATION_FREQUENCY == 1 or \
                    metric not in ["psnr", "ssim", "lpips", "fid"]:
                writer.add_scalar(metric, metric_values[-1],
                                  int(image_percentage * num_images * 0.01))

        if epoch == 1 or epoch == (
                NUM_PRETRAIN_EPOCHS + NUM_ADV_EPOCHS
        ) or epoch % VALIDATION_FREQUENCY == 0 or VALIDATION_FREQUENCY == 1:
            # save model parameters
            models_path = results_folder / "saved_models"
            models_path.mkdir(exist_ok=True)
            torch.save(
                {
                    'progress': image_percentage,
                    'g_net': g_net.state_dict(),
                    'd_net': g_net.state_dict(),
                    # 'g_optimizer': g_optimizer.state_dict(), Uncomment this if you want resume training
                    # 'd_optimizer': d_optimizer.state_dict(),
                },
                str(models_path / f'progress_{image_percentage:.1f}.tar'))
Exemple #8
0
def validation_step(dataloader: DataLoader,
                    netG: nn.Module,
                    out_dir: str,
                    idx_epoch: int,
                    num_epochs: int,
                    num_print: int = 5) -> dict:
    netG.eval()
    num_samples = len(dataloader)
    batch_sizes = 0
    step_ = int(math.ceil(num_samples) / num_print)
    t1 = time.time()
    with torch.no_grad():
        val_results = {
            'mse': 0,
            'ssims': 0,
            'psnr': 0,
            'ssim': 0,
            'batch_sizes': 0
        }
        val_images = []
        for idx_val, data_val in enumerate(dataloader):
            val_lr, val_hr_restore, val_hr = data_val['lr'], data_val[
                'lr_up'], data_val['hr']
            batch_size = val_lr.size(0)
            batch_sizes += batch_size
            val_hr_restore = x_preprocess(val_hr_restore, to_device=None)
            lr = x_preprocess(val_lr, to_device=to_device)
            hr = x_preprocess(val_hr, to_device=to_device)
            sr = netG(lr)
            #
            batch_mse = float(((sr - hr)**2).mean())
            val_results['mse'] += batch_mse * batch_size
            batch_ssim = float(pytorch_ssim.ssim(sr, hr))  # .data[0]
            val_results['ssims'] += batch_ssim * batch_size
            val_results['psnr'] = 10 * log10(
                1 / (val_results['mse'] / batch_sizes))
            val_results['ssim'] = val_results['ssims'] / batch_sizes
            if (idx_val % step_) == 0:
                print('\t\t(VAL) [{}/{}] <- MSE/SSIM = {:0.3f}/{:0.3f}'.format(
                    idx_val, num_samples, batch_mse, batch_ssim))
            val_images.extend([
                display_transform()(val_hr_restore.squeeze(0)),
                display_transform()(hr.data.cpu().squeeze(0)),
                display_transform()(sr.data.cpu().squeeze(0))
            ])
        chunk_size = 3 * 5
        size_round = chunk_size * int(math.floor(len(val_images) / chunk_size))
        val_images = val_images[:size_round]
        val_images = torch.stack(val_images)
        val_images = torch.chunk(val_images, val_images.size(0) // chunk_size)
        for idx_image, image in enumerate(val_images):  # val_save_bar:
            image = utils.make_grid(image, nrow=3, padding=5)
            path_img_out = os.path.join(
                out_dir,
                'epoch_{:05d}_index_{:02d}.png'.format(idx_epoch, idx_image))
            utils.save_image(image, path_img_out, padding=5)
    dt = time.time() - t1
    val_results = {k: v / batch_sizes for k, v in val_results.items()}
    tmp_ = ', '.join(
        ['{}: {:0.2f}'.format(k, v) for k, v in val_results.items()])
    print('\t\t(VALIDATION) ({}/{}) dt ~{:0.2f} (s), {}'.format(
        idx_epoch, num_epochs, dt, tmp_))
    return val_results
Exemple #9
0
                    batch_mse = ((sr - hr)**2).data.mean()
                    valing_results['mse'] += batch_mse * batch_size
                    batch_ssim = pytorch_ssim.ssim(sr, hr).item()
                    valing_results['ssims'] += batch_ssim * batch_size
                    valing_results['psnr'] = 10 * log10(
                        (hr.max()**2) / (valing_results['mse'] /
                                         valing_results['batch_sizes']))
                    valing_results['ssim'] = valing_results[
                        'ssims'] / valing_results['batch_sizes']
                    val_bar.set_description(
                        desc=
                        '[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f'
                        % (valing_results['psnr'], valing_results['ssim']))

                    val_images.extend([
                        display_transform()(lr.squeeze(0)),
                        display_transform()(sr.data.cpu().squeeze(0)),
                        display_transform()(hr.data.cpu().squeeze(0))
                    ])
                val_images = torch.stack(val_images)
                val_images = torch.chunk(val_images,
                                         val_images.size()[0] // 15)
                val_save_bar = tqdm(val_images,
                                    desc='[saving training results]')
                index = 1
                for image in val_save_bar:
                    image = utils.make_grid(image, nrow=3, padding=5)
                    utils.save_image(image,
                                     out_path + 'epoch_%d_index_%d.png' %
                                     (epoch, index),
                                     padding=5)
Exemple #10
0
def main(args):
    if (not os.path.exists('data/dataset.pt')):
        # Make sure the bit depth is 24, 8 = Gray scale
        df = pd.read_pickle('data/dataset_files.gzip')
        df = df[(df['width'] > 100) & (df['height'] > 100)]
        train_df, val_df = train_test_split(df,
                                            test_size=0.2,
                                            random_state=42,
                                            shuffle=True)
        _, val_similar = dataframe_find_similar_images(
            val_df, batch_size=args.batch_size)

        # Create the train dataset
        train_filenames = train_df['filename'].tolist()
        train_set = TrainDatasetFromList(train_filenames,
                                         crop_size=args.crop_size,
                                         upscale_factor=args.upscale_factor)

        val_sets = list()
        for val_df in val_similar:
            val_filenames = val_df['filename'].tolist()
            val_set = ValDatasetFromList(val_filenames,
                                         upscale_factor=args.upscale_factor)
            val_sets.append(val_set)

        train_sampler = torch.utils.data.RandomSampler(train_set)
        val_sampler = torch.utils.data.SequentialSampler(val_set)
        data_to_save = {
            'train_dataset': train_set,
            "val_datasets": val_sets,
            'train_sampler': train_sampler,
            'val_sampler': val_sampler
        }
        torch.save(data_to_save, 'data/dataset.pt')
    else:
        datasets = torch.load('data/dataset.pt')
        train_set = datasets['train_dataset']
        val_sets = datasets['val_datasets']
        train_sampler = datasets['train_sampler']
        val_sampler = datasets['val_sampler']

    train_loader = DataLoader(dataset=train_set,
                              batch_size=args.batch_size,
                              num_workers=args.num_workers,
                              sampler=train_sampler)
    val_loaders = list()
    for val_set in val_sets:
        val_loaders.append(
            DataLoader(dataset=val_set,
                       batch_size=args.batch_size,
                       num_workers=args.num_workers,
                       shuffle=False))

    netG = Generator(args.upscale_factor)
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:',
          sum(param.numel() for param in netD.parameters()))

    generator_criterion = GeneratorLoss()

    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters())
    optimizerD = optim.Adam(netD.parameters())

    results = {
        'd_loss': [],
        'g_loss': [],
        'd_score': [],
        'g_score': [],
        'psnr': [],
        'ssim': []
    }
    start_epoch = 1
    if args.resume:
        import glob
        netG_files = glob.glob(
            os.path.join(args.output_dir,
                         'netG_epoch_%d_*.pth' % (args.upscale_factor)))
        netD_files = glob.glob(
            os.path.join(args.output_dir,
                         'netD_epoch_%d_*.pth' % (args.upscale_factor)))
        if (len(netG_files) > 0):
            netG_file = max(netG_files, key=os.path.getctime)
            netD_file = max(netD_files, key=os.path.getctime)
            netG.load_state_dict(torch.load(netG_file))
            netD.load_state_dict(torch.load(netD_file))
            start_epoch = len(netG_files)

    for epoch in range(start_epoch, args.epochs + 1):
        train_bar = tqdm(train_loader)
        running_results = {
            'batch_sizes': 0,
            'd_loss': 0,
            'g_loss': 0,
            'd_score': 0,
            'g_score': 0
        }

        dscaler = torch.cuda.amp.GradScaler(
        )  # Creates once at the beginning of training #* Discriminator
        gscaler = torch.cuda.amp.GradScaler()  #* Generator
        netG.train()
        netD.train()
        for data, target in train_bar:
            with torch.cuda.amp.autocast():  # Mix precision
                batch_size = data.size(0)
                running_results['batch_sizes'] += batch_size

                ############################
                # (1) Update D network: maximize D(x)-1-D(G(z))
                ###########################
                netD.zero_grad()
                real_img = Variable(target, requires_grad=False)
                if torch.cuda.is_available():
                    real_img = real_img.cuda()
                z = Variable(data)
                if torch.cuda.is_available():
                    z = z.cuda()
                fake_img = netG(z)

                real_out = netD(real_img).mean(
                )  # Discriminator Takes in the real image and predicts whether it's real
                fake_out = netD(fake_img).mean(
                )  # Discriminator takes in the fake image and predicts if it's fake
                d_loss = 1 - real_out + fake_out  # Minimizing the loss would mean real_out=1 and fake out = 0. so it knows the real image it knows the fake image

                # d_loss.backward(retain_graph=True)
                # optimizerD.step()
                ############################
                # (2) Update G network: minimize 1-D(G(z)) + Perception Loss + Image Loss + TV Loss
                ###########################
                netG.zero_grad()
                g_loss = generator_criterion(fake_out.detach(), fake_img,
                                             real_img.detach())

            dscaler.scale(d_loss).backward(retain_graph=True)
            gscaler.scale(g_loss).backward()

            dscaler.step(optimizerD)
            dscaler.update()
            gscaler.step(optimizerG)
            gscaler.update()

            fake_img = netG(z)
            fake_out = netD(fake_img).mean()

            # loss for current batch before optimization
            running_results['g_loss'] += g_loss.item() * batch_size
            running_results['d_loss'] += d_loss.item() * batch_size
            running_results['d_score'] += real_out.item() * batch_size
            running_results['g_score'] += fake_out.item() * batch_size

            train_bar.set_description(
                desc=
                '[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f' %
                (epoch, args.epochs,
                 running_results['d_loss'] / running_results['batch_sizes'],
                 running_results['g_loss'] / running_results['batch_sizes'],
                 running_results['d_score'] / running_results['batch_sizes'],
                 running_results['g_score'] / running_results['batch_sizes']))

        # save model parameters
        torch.save(
            netG.state_dict(),
            os.path.join(args.output_dir, 'netG_epoch_%d_%d.pth' %
                         (args.upscale_factor, epoch)))
        torch.save(
            netD.state_dict(),
            os.path.join(args.output_dir, 'netD_epoch_%d_%d.pth' %
                         (args.upscale_factor, epoch)))

        if epoch % args.validation_epoch == 0 and epoch != 0:
            netG.eval()
            with torch.no_grad():
                val_results = {
                    'mse': 0,
                    'ssims': 0,
                    'psnr': 0,
                    'ssim': 0,
                    'batch_sizes': 0
                }
                val_images = []
                for i in trange(len(val_loaders), desc='Running validation'):
                    val_loader = val_loaders[i]
                    for val_lr, val_hr_restore, val_hr in val_loader:
                        batch_size = val_lr.size(0)
                        val_results['batch_sizes'] += batch_size
                        lr = val_lr
                        hr = val_hr
                        if torch.cuda.is_available():
                            lr = lr.cuda()
                            hr = hr.cuda()
                        sr = netG(lr)

                        batch_mse = ((sr - hr)**2).data.mean()
                        val_results['mse'] += batch_mse * batch_size
                        batch_ssim = pytorch_ssim.ssim(sr, hr).item()
                        val_results['ssims'] += batch_ssim * batch_size
                        val_results['psnr'] = 10 * log10(
                            (hr.max()**2) /
                            (val_results['mse'] / val_results['batch_sizes']))
                        val_results['ssim'] = val_results[
                            'ssims'] / val_results['batch_sizes']
                        # val_bar.set_description(
                        #     desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (
                        #         val_results['psnr'], val_results['ssim']))

                        # convert the validation images
                        val_hr_restore_squeeze = val_hr_restore.squeeze(0)
                        hr_squeeze = hr.data.cpu().squeeze(0)
                        sr_squeeze = sr.data.cpu().squeeze(0)
                        for b in range(batch_size):
                            val_hr = val_hr_restore_squeeze[b]
                            hr_temp = hr_squeeze[b]
                            sr_temp = sr_squeeze[b]
                            val_images.extend([
                                display_transform()(val_hr),
                                display_transform()(hr_temp),
                                display_transform()(sr_temp)
                            ])

                val_images = torch.stack(val_images)
                val_images = torch.chunk(val_images, val_images.size(0) // 15)
                val_save_bar = tqdm(val_images,
                                    desc='[saving training results]')
                index = 1
                for image in val_save_bar:
                    image = utils.make_grid(image, nrow=3, padding=5)
                    utils.save_image(
                        image,
                        os.path.join(
                            args.output_dir,
                            'epoch_%d_upscale_%d_index_%d.png' %
                            (epoch, args.upscale_factor, index)))
                    index += 1

            # save loss\scores\psnr\ssim
            results['d_loss'].append(running_results['d_loss'] /
                                     running_results['batch_sizes'])
            results['g_loss'].append(running_results['g_loss'] /
                                     running_results['batch_sizes'])
            results['d_score'].append(running_results['d_score'] /
                                      running_results['batch_sizes'])
            results['g_score'].append(running_results['g_score'] /
                                      running_results['batch_sizes'])
            results['psnr'].append(val_results['psnr'])
            results['ssim'].append(val_results['ssim'])

        if epoch % 10 == 0 and epoch != 0:
            out_path = 'statistics/'
            data_frame = pd.DataFrame(data={
                'Loss_D': results['d_loss'],
                'Loss_G': results['g_loss'],
                'Score_D': results['d_score'],
                'Score_G': results['g_score'],
                'PSNR': results['psnr'],
                'SSIM': results['ssim']
            },
                                      index=range(1, epoch + 1))
            data_frame.to_csv(out_path + 'srf_' + str(args.upscale_factor) +
                              '_train_results.csv',
                              index_label='Epoch')
for image_name, lr_image, hr_restore_img, hr_image in test_bar:
    with torch.no_grad():
        image_name = image_name[0]
        lr_image = Variable(lr_image, volatile=True)
        hr_image = Variable(hr_image, volatile=True)
        if torch.cuda.is_available():
            lr_image = lr_image.cuda()
            hr_image = hr_image.cuda()

        sr_image = model(lr_image)
        mse = ((hr_image - sr_image)**2).data.mean()
        psnr = 10 * log10(1 / mse)
        ssim = float(pytorch_ssim.ssim(sr_image, hr_image))

        test_out = display_transform()(hr_restore_img.squeeze(0))
        test_gt = display_transform()(hr_image.data.cpu().squeeze(0))
        #test_images = torch.stack(
        #    [display_transform()(hr_restore_img.squeeze(0)), display_transform()(hr_image.data.cpu().squeeze(0)),
        #     display_transform()(sr_image.data.cpu().squeeze(0))])
        #        image = utils.make_grid(test_images, nrow=3, padding=5)
        # utils.save_image(test_out, out_path + image_name.split('.')[0] + '_psnr_%.4f_ssim_%.4f.' % (psnr, ssim) +
        #                 image_name.split('.')[-1], padding=5)
        utils.save_image(test_gt, out_path_2 + image_name)
        utils.save_image(test_out, out_path + image_name, padding=5)
        #  utils.save_image(test_out,)

        # save psnr\ssim
        results[image_name.split('_')[0]]['psnr'].append(psnr)
        results[image_name.split('_')[0]]['ssim'].append(ssim)
Exemple #12
0
def main_train(path_trn: str, path_val: str,
               crop_size: int, upscale_factor: int, num_epochs: int,
               num_workers: int, to_device: str = 'cuda:0', batch_size: int = 64):
    to_device = get_device(to_device)
    train_set = TrainDatasetFromFolder(path_trn, crop_size=crop_size, upscale_factor=upscale_factor)
    val_set = ValDatasetFromFolder(path_val, upscale_factor=upscale_factor)
    # train_set = TrainDatasetFromFolder('data/VOC2012/train', crop_size=crop_size, upscale_factor=upscale_factor)
    # val_set = ValDatasetFromFolder('data/VOC2012/val', upscale_factor=upscale_factor)
    #
    train_loader = DataLoader(dataset=train_set, num_workers=num_workers, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(dataset=val_set, num_workers=num_workers, batch_size=1, shuffle=False)

    netG = Generator(upscale_factor)
    print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:', sum(param.numel() for param in netD.parameters()))

    generator_criterion = GeneratorLoss()

    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters())
    optimizerD = optim.Adam(netD.parameters())

    results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': [], 'psnr': [], 'ssim': []}

    for epoch in range(1, num_epochs + 1):
        train_bar = tqdm(train_loader)
        running_results = {'batch_sizes': 0, 'd_loss': 0, 'g_loss': 0, 'd_score': 0, 'g_score': 0}

        netG.train()
        netD.train()
        # FIXME: seperate function for epoch training
        for data, target in train_bar:
            g_update_first = True
            batch_size = data.size(0)
            #
            # img_hr = target.numpy().transpose((0, 2, 3, 1))[0]
            # img_lr = data.numpy().transpose((0, 2, 3, 1))[0]
            # img_lr_x4 = cv2.resize(img_lr, img_hr.shape[:2], interpolation=cv2.INTER_CUBIC)
            # #
            # plt.subplot(1, 3, 1)
            # plt.imshow(img_hr)
            # plt.subplot(1, 3, 2)
            # plt.imshow(img_lr)
            # plt.subplot(1, 3, 3)
            # plt.imshow(img_lr_x4)
            # plt.show()
            running_results['batch_sizes'] += batch_size

            ############################
            # (1) Update D network: maximize D(x)-1-D(G(z))
            ###########################
            # real_img = Variable(target)
            # if torch.cuda.is_available():
            #     real_img = real_img.cuda()
            # z = Variable(data)
            # if torch.cuda.is_available():
            #     z = z.cuda()
            z = data.to(to_device)
            real_img = target.to(to_device)
            fake_img = netG(z)

            netD.zero_grad()
            real_out = netD(real_img).mean()
            fake_out = netD(fake_img).mean()
            d_loss = 1 - real_out + fake_out
            d_loss.backward(retain_graph=True)
            optimizerD.step()

            ############################
            # (2) Update G network: minimize 1-D(G(z)) + Perception Loss + Image Loss + TV Loss
            ###########################
            netG.zero_grad()
            g_loss = generator_criterion(fake_out, fake_img, real_img)
            g_loss.backward()
            optimizerG.step()
            fake_img = netG(z)
            fake_out = netD(fake_img).mean()

            g_loss = generator_criterion(fake_out, fake_img, real_img)
            running_results['g_loss'] += float(g_loss) * batch_size
            d_loss = 1 - real_out + fake_out
            running_results['d_loss'] += float(d_loss) * batch_size
            running_results['d_score'] += float(real_out) * batch_size
            running_results['g_score'] += float(fake_out) * batch_size

            train_bar.set_description(desc='[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f' % (
                epoch, num_epochs, running_results['d_loss'] / running_results['batch_sizes'],
                running_results['g_loss'] / running_results['batch_sizes'],
                running_results['d_score'] / running_results['batch_sizes'],
                running_results['g_score'] / running_results['batch_sizes']))

        netG.eval()
        #FIXME: seperate function for epoch validation
        with torch.no_grad():
            out_path = 'training_results/SRF_' + str(upscale_factor) + '/'
            if not os.path.exists(out_path):
                os.makedirs(out_path)
            val_bar = tqdm(val_loader)
            valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}
            val_images = []
            for val_lr, val_hr_restore, val_hr in val_bar:
                batch_size = val_lr.size(0)
                valing_results['batch_sizes'] += batch_size
                # lr = Variable(val_lr, volatile=True)
                # hr = Variable(val_hr, volatile=True)
                # if torch.cuda.is_available():
                #     lr = lr.cuda()
                #     hr = hr.cuda()
                lr = val_lr.to(to_device)
                hr = val_hr.to(to_device)
                sr = netG(lr)

                batch_mse = ((sr - hr) ** 2).mean()
                valing_results['mse'] += float(batch_mse) * batch_size
                batch_ssim = float(pytorch_ssim.ssim(sr, hr)) #.data[0]
                valing_results['ssims'] += batch_ssim * batch_size
                valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_sizes']))
                valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']
                val_bar.set_description(
                    desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (
                        valing_results['psnr'], valing_results['ssim']))

                val_images.extend(
                    [display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.data.cpu().squeeze(0)),
                     display_transform()(sr.data.cpu().squeeze(0))])
            val_images = torch.stack(val_images)
            val_images = torch.chunk(val_images, val_images.size(0) // 15)
            val_save_bar = tqdm(val_images, desc='[saving training results]')
            index = 1
            for image in val_save_bar:
                image = utils.make_grid(image, nrow=3, padding=5)
                utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
                index += 1

        # save model parameters
        torch.save(netG.state_dict(), 'epochs/netG_epoch_%d_%d.pth' % (upscale_factor, epoch))
        torch.save(netD.state_dict(), 'epochs/netD_epoch_%d_%d.pth' % (upscale_factor, epoch))
        # save loss\scores\psnr\ssim
        results['d_loss'].append(running_results['d_loss'] / running_results['batch_sizes'])
        results['g_loss'].append(running_results['g_loss'] / running_results['batch_sizes'])
        results['d_score'].append(running_results['d_score'] / running_results['batch_sizes'])
        results['g_score'].append(running_results['g_score'] / running_results['batch_sizes'])
        results['psnr'].append(valing_results['psnr'])
        results['ssim'].append(valing_results['ssim'])

        if epoch % 10 == 0 and epoch != 0:
            out_path = 'statistics/'
            data_frame = pd.DataFrame(
                data={'Loss_D': results['d_loss'], 'Loss_G': results['g_loss'], 'Score_D': results['d_score'],
                      'Score_G': results['g_score'], 'PSNR': results['psnr'], 'SSIM': results['ssim']},
                index=range(1, epoch + 1))
            data_frame.to_csv(out_path + 'srf_' + str(upscale_factor) + '_train_results.csv', index_label='Epoch')
Exemple #13
0
def test_benchmark(upscale_factor, epoch_num):
    model_name = 'netG_epoch_{}_100.pth'.format(upscale_factor)

    results = {
        'Set5': {
            'psnr': [],
            'ssim': []
        },
        'Set14': {
            'psnr': [],
            'ssim': []
        },
        'BSD100': {
            'psnr': [],
            'ssim': []
        },
        'Urban100': {
            'psnr': [],
            'ssim': []
        },
        'SunHays80': {
            'psnr': [],
            'ssim': []
        }
    }

    model = Generator(upscale_factor).eval()
    if torch.cuda.is_available():
        model = model.cuda()
    model.load_state_dict(
        torch.load('epochs/' + model_name, map_location=torch.device('cpu')))

    test_set = TestDatasetFromFolder('data/test',
                                     upscale_factor=upscale_factor)
    test_loader = DataLoader(dataset=test_set,
                             num_workers=4,
                             batch_size=1,
                             shuffle=False)
    test_bar = tqdm(test_loader, desc='[testing benchmark datasets]')

    out_path = 'benchmark_results/SRF_' + str(upscale_factor) + '/'
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    for image_name, lr_image, hr_restore_img, hr_image in test_bar:
        image_name = image_name[0]

        # volatile is no longer available
        # lr_image = Variable(lr_image, volatile=True)
        # hr_image = Variable(hr_image, volatile=True)
        # image = Variable(ToTensor()(lr_image), volatile=True).unsqueeze(0)
        with torch.no_grad():
            lr_image = Variable(lr_image)
            hr_image = Variable(hr_image)

        if torch.cuda.is_available():
            lr_image = lr_image.cuda()
            hr_image = hr_image.cuda()

        sr_image = model(lr_image)
        mse = ((hr_image - sr_image)**2).data.mean()
        psnr = 10 * log10(1 / mse)
        ssim = pytorch_ssim.ssim(sr_image, hr_image).data.item()

        test_images = torch.stack([
            display_transform()(hr_restore_img.squeeze(0)),
            display_transform()(hr_image.data.cpu().squeeze(0)),
            display_transform()(sr_image.data.cpu().squeeze(0))
        ])
        image = utils.make_grid(test_images, nrow=3, padding=5)
        utils.save_image(image,
                         out_path + image_name.split('.')[0] +
                         '_psnr_%.4f_ssim_%.4f.' % (psnr, ssim) +
                         image_name.split('.')[-1],
                         padding=5)

        # save psnr\ssim
        results[image_name.split('_')[0]]['psnr'].append(psnr)
        results[image_name.split('_')[0]]['ssim'].append(ssim)

    out_path = 'statistics/'
    saved_results = {'psnr': [], 'ssim': []}
    for item in results.values():
        psnr = np.array(item['psnr'])
        ssim = np.array(item['ssim'])
        if (len(psnr) == 0) or (len(ssim) == 0):
            psnr = 'N/A'
            ssim = 'N/A'
        else:
            psnr = psnr.mean()
            ssim = ssim.mean()
        saved_results['psnr'].append(psnr)
        saved_results['ssim'].append(ssim)

    data_frame = pd.DataFrame(saved_results, results.keys())
    data_frame.to_csv(out_path + 'srf_' + str(upscale_factor) +
                      '_test_results.csv',
                      index_label='DataSet')
    return data_frame