コード例 #1
0
ファイル: projectcode.py プロジェクト: JIN-096/CAP1-Web
def SuperResolution(f_name, ori):
    pth = "./generator.pth"
    channels = 3
    residual_blocks = 23
    device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")

    # Define model and load model checkpoint
    generator = GeneratorRRDB(channels,
                              filters=64,
                              num_res_blocks=residual_blocks).to(device)
    generator.load_state_dict(torch.load(pth))
    generator.eval()

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    # Prepare input

    image_tensor = Variable(transform(ori)).to(device).unsqueeze(0)

    # Upsample image

    with torch.no_grad():
        sr_image = denormalize(generator(image_tensor)).cpu()

    # Save image
    path = os.path.join("./data/sr_img/", f_name)
    oripath = os.path.join("./data/preprocessed_img/", f_name)
    save_image(sr_image, path)
    result = OCR(path)
    global ocr_result
    ocr_result = result
    mse, psnr = PSNR(oripath, path)
    save_result(f_name, result, mse, psnr)
コード例 #2
0
 def superresolution(self, images):
     if not isinstance(images, np.ndarray):
         images = [image for image in images]
     sr_images = []
     for image in images:
         image_tensor = self.transform(image).to(self.device).unsqueeze(0)
         with torch.no_grad():
             enhanced = self.generator(image_tensor)
             sr_image = denormalize(enhanced)
             ndarr = sr_image.mul(255).add_(0.5).clamp_(0, 255)[0].permute(1, 2, 0).detach().to('cpu', torch.uint8)\
                 .numpy()
         sr_images.append(ndarr)
     return sr_images
コード例 #3
0
ファイル: test_on_image.py プロジェクト: dazhigulov/ESRGAN
                    help="Number of residual blocks in G")
opt = parser.parse_args()
print(opt)

os.makedirs("images/outputs", exist_ok=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Define model and load model checkpoint
generator = GeneratorRRDB(opt.channels,
                          filters=64,
                          num_res_blocks=opt.residual_blocks).to(device)
generator.load_state_dict(torch.load(opt.checkpoint_model))
generator.eval()

transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean, std)])

# Prepare input
image_tensor = Variable(transform(Image.open(
    opt.image_path))).to(device).unsqueeze(0)

# Upsample image
with torch.no_grad():
    sr_image = denormalize(generator(image_tensor)).cpu()

# Save image
fn = opt.image_path.split("/")[-1]
save_image(sr_image, f"images/outputs/sr-{fn}")
コード例 #4
0
ファイル: demo.py プロジェクト: hampen2929/esrgan
    DemoImageDataset(demo_in_dir),
    batch_size=opt.batch_size,
    shuffle=False,
    num_workers=opt.n_cpu,
)

# # generate hr image

# 入力された画像を高解像度画像、リサイズで縦横を1/4にした画像を低解像度画像として、低解像度画像から高解像度画像を生成する

with torch.no_grad():
    for i, imgs in enumerate(demo_dataloader):
        # Save image grid with upsampled inputs and outputs
        imgs_lr = Variable(imgs["lr"].type(Tensor))
        gen_hr = generator(imgs_lr)
        imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)

        imgs_lr = denormalize(imgs_lr)
        gen_hr = denormalize(gen_hr)

        os.makedirs(demo_out_dir, exist_ok=True)

        save_image(imgs_lr,
                   osp.join(demo_out_dir, "low_{:01}.png".format(i)),
                   nrow=1,
                   normalize=False)
        save_image(gen_hr,
                   osp.join(demo_out_dir, "gen_hr_{:01}.png".format(i)),
                   nrow=1,
                   normalize=False)
コード例 #5
0
                'content': loss_content.item(),
                'adv': loss_GAN.item(),
                'pixel': loss_pixel.item(),
            }

            if batch_num == 1:
                sys.stdout.write("\n{}".format(log_info))
            else:
                sys.stdout.write("\r{}".format(log_info))

            sys.stdout.flush()

        if batches_done % opt.sample_interval == 0:
            # Save image grid with upsampled inputs and ESRGAN outputs
            imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)
            img_grid = denormalize(torch.cat((imgs_lr, gen_hr), -1))

            image_batch_save_dir = osp.join(image_train_save_dir,
                                            '{:07}'.format(batches_done))
            os.makedirs(osp.join(image_batch_save_dir, "hr_image"),
                        exist_ok=True)
            save_image(img_grid,
                       osp.join(image_batch_save_dir, "hr_image",
                                "%d.png" % batches_done),
                       nrow=1,
                       normalize=False)

            with torch.no_grad():
                for i, imgs in enumerate(test_dataloader):
                    # Save image grid with upsampled inputs and outputs
                    imgs_lr = Variable(imgs["lr"].type(Tensor))
コード例 #6
0
ファイル: train_old.py プロジェクト: hampen2929/esrgan
    def train(self, dataloader, opt):
        for epoch in range(opt.epoch + 1, opt.n_epochs + 1):
            for batch_num, imgs in enumerate(dataloader):
                Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
                ) else torch.Tensor
                batches_done = (epoch - 1) * len(dataloader) + batch_num

                # Configure model input
                imgs_lr = Variable(imgs["lr"].type(Tensor))
                imgs_hr = Variable(imgs["hr"].type(Tensor))

                # Adversarial ground truths
                valid = Variable(Tensor(
                    np.ones((imgs_lr.size(0), *discriminator.output_shape))),
                                 requires_grad=False)
                fake = Variable(Tensor(
                    np.zeros((imgs_lr.size(0), *discriminator.output_shape))),
                                requires_grad=False)

                # ------------------
                #  Train Generators
                # ------------------

                optimizer_G.zero_grad()

                # Generate a high resolution image from low resolution input
                gen_hr = generator(imgs_lr)

                # Measure pixel-wise loss against ground truth
                loss_pixel = criterion_pixel(gen_hr, imgs_hr)

                # Warm-up (pixel-wise loss only)
                if batches_done <= opt.warmup_batches:
                    loss_pixel.backward()
                    optimizer_G.step()
                    log_info = "[Epoch {}/{}] [Batch {}/{}] [G pixel: {}]".format(
                        epoch, opt.n_epochs, batch_num, len(dataloader),
                        loss_pixel.item())

                    sys.stdout.write("\r{}".format(log_info))
                    sys.stdout.flush()

                    mlflow.log_metric('train_{}'.format('loss_pixel'),
                                      loss_pixel.item(),
                                      step=batches_done)
                else:
                    # Extract validity predictions from discriminator
                    pred_real = discriminator(imgs_hr).detach()
                    pred_fake = discriminator(gen_hr)

                    # Adversarial loss (relativistic average GAN)
                    loss_GAN = criterion_GAN(
                        pred_fake - pred_real.mean(0, keepdim=True), valid)

                    # Content loss
                    gen_features = feature_extractor(gen_hr)
                    real_features = feature_extractor(imgs_hr).detach()
                    loss_content = criterion_content(gen_features,
                                                     real_features)

                    # Total generator loss
                    loss_G = loss_content + opt.lambda_adv * loss_GAN + opt.lambda_pixel * loss_pixel

                    loss_G.backward()
                    optimizer_G.step()

                    # ---------------------
                    #  Train Discriminator
                    # ---------------------

                    optimizer_D.zero_grad()

                    pred_real = discriminator(imgs_hr)
                    pred_fake = discriminator(gen_hr.detach())

                    # Adversarial loss for real and fake images (relativistic average GAN)
                    loss_real = criterion_GAN(
                        pred_real - pred_fake.mean(0, keepdim=True), valid)
                    loss_fake = criterion_GAN(
                        pred_fake - pred_real.mean(0, keepdim=True), fake)

                    # Total loss
                    loss_D = (loss_real + loss_fake) / 2

                    loss_D.backward()
                    optimizer_D.step()

                    # --------------
                    #  Log Progress
                    # --------------

                    log_info = "[Epoch {}/{}] [Batch {}/{}] [D loss: {}] [G loss: {}, content: {}, adv: {}, pixel: {}]".format(
                        epoch,
                        opt.n_epochs,
                        batch_num,
                        len(dataloader),
                        loss_D.item(),
                        loss_G.item(),
                        loss_content.item(),
                        loss_GAN.item(),
                        loss_pixel.item(),
                    )

                    if batch_num == 1:
                        sys.stdout.write("\n{}".format(log_info))
                    else:
                        sys.stdout.write("\r{}".format(log_info))

                    sys.stdout.flush()

                    # import pdb; pdb.set_trace()

                    if batches_done % opt.sample_interval == 0:
                        # Save image grid with upsampled inputs and ESRGAN outputs
                        imgs_lr = nn.functional.interpolate(imgs_lr,
                                                            scale_factor=4)
                        img_grid = denormalize(torch.cat((imgs_lr, gen_hr),
                                                         -1))

                        image_batch_save_dir = osp.join(
                            image_train_save_dir, '{:07}'.format(batches_done))
                        os.makedirs(osp.join(image_batch_save_dir, "hr_image"),
                                    exist_ok=True)
                        save_image(img_grid,
                                   osp.join(image_batch_save_dir, "hr_image",
                                            "%d.png" % batches_done),
                                   nrow=1,
                                   normalize=False)

                    if batches_done % opt.checkpoint_interval == 0:
                        # Save model checkpoints
                        torch.save(
                            generator.state_dict(),
                            osp.join(weight_save_dir,
                                     "generator_%d.pth" % epoch))
                        torch.save(
                            discriminator.state_dict(),
                            osp.join(weight_save_dir,
                                     "discriminator_%d.pth" % epoch))

                    mlflow.log_metric('train_{}'.format('loss_D'),
                                      loss_D.item(),
                                      step=batches_done)
                    mlflow.log_metric('train_{}'.format('loss_G'),
                                      loss_G.item(),
                                      step=batches_done)
                    mlflow.log_metric('train_{}'.format('loss_content'),
                                      loss_content.item(),
                                      step=batches_done)
                    mlflow.log_metric('train_{}'.format('loss_GAN'),
                                      loss_GAN.item(),
                                      step=batches_done)
                    mlflow.log_metric('train_{}'.format('loss_pixel'),
                                      loss_pixel.item(),
                                      step=batches_done)
コード例 #7
0
ファイル: test_on_image.py プロジェクト: zyw1218/OUCML
    with torch.no_grad():
        sr_image = generator(image_tensor)

    # Save image
    fn = image_path.split("/")[-2] + "/" + image_path.split("/")[-1]

    if opt.concat == "yes":
        if opt.netG == "RRDB":
            # sr_image =sr_image
            image_tensor = nn.functional.interpolate(image_tensor,
                                                     scale_factor=2)

        print(image_tensor.shape, sr_image.shape, opt.checkpoint_model[-6:-4])
        print(torch.cat((image_tensor, sr_image), 3).shape)

        img_grid = denormalize(torch.cat((image_tensor, sr_image), 3))
        os.makedirs(f"{save_path}_concat", exist_ok=True)
        save_image(img_grid,
                   f"{save_path}_concat/{fn[:-4]}.tif",
                   normalize=False)

    else:
        print(image_tensor.shape, sr_image.shape, opt.checkpoint_model[-6:-4])
        if opt.netG == "RRDB":
            # sr_image =sr_image
            image_tensor = nn.functional.interpolate(image_tensor,
                                                     scale_factor=4)
        img_grid = denormalize(sr_image)
        img_grid_o = denormalize(image_tensor)
        os.makedirs(f"{save_path}_gen/" + image_path.split("/")[-2],
                    exist_ok=True)