Example #1
0
def train(args):
    h5_file = h5py.File(args.output_path, 'w')

    lr_group = h5_file.create_group('lr')
    hr_group = h5_file.create_group('hr')

    image_list = sorted(glob.glob('{}/*'.format(args.images_dir)))[:args.max_images]

    for i, image_path in enumerate(image_list):
        hr = pil_image.open(image_path).convert('RGB')
        hr_width = (hr.width // args.scale) * args.scale
        hr_height = (hr.height // args.scale) * args.scale
        hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
        lr = hr.resize((hr_width // args.scale, hr_height // args.scale), resample=pil_image.BICUBIC)
        hr = np.array(hr).astype(np.float32)
        lr = np.array(lr).astype(np.float32)
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)
        hr = np.clip(hr, 0.0, 255.0).astype(np.uint8)
        lr = np.clip(lr, 0.0, 255.0).astype(np.uint8)

        lr_group.create_dataset(str(i), data=lr)
        hr_group.create_dataset(str(i), data=hr)

        print(i)

    h5_file.close()
Example #2
0
def eval(args):
    h5_file = h5py.File(args.output_path, 'w')

    lr_group = h5_file.create_group('lr')
    hr_group = h5_file.create_group('hr')

    for i, image_path in enumerate(
            sorted(glob.glob('{}/*'.format(args.images_dir)))):
        hr = pil_image.open(image_path).convert('RGB')
        hr_width = (hr.width // args.scale) * args.scale
        hr_height = (hr.height // args.scale) * args.scale
        hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
        lr = hr.resize((hr_width // args.scale, hr_height // args.scale),
                       resample=pil_image.BICUBIC)
        lr = lr.resize((lr.width * args.scale, lr.height * args.scale),
                       resample=pil_image.BICUBIC)
        hr = np.array(hr).astype(np.float32)
        lr = np.array(lr).astype(np.float32)
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)

        lr_group.create_dataset(str(i), data=lr)
        hr_group.create_dataset(str(i), data=hr)

    h5_file.close()
Example #3
0
def train(args):
    h5_file = h5py.File(args.output_path, 'w')

    lr_patches = []
    hr_patches = []

    for image_path in sorted(glob.glob('{}/*'.format(args.images_dir))):
        hr = pil_image.open(image_path).convert('RGB')
        hr_width = (hr.width // args.scale) * args.scale
        hr_height = (hr.height // args.scale) * args.scale
        hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
        lr = hr.resize((hr_width // args.scale, hr_height // args.scale),
                       resample=pil_image.BICUBIC)
        hr = np.array(hr).astype(np.float32)
        lr = np.array(lr).astype(np.float32)
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)

        for i in range(0, lr.shape[0] - args.patch_size + 1, args.stride):
            for j in range(0, lr.shape[1] - args.patch_size + 1, args.stride):
                lr_patches.append(lr[i:i + args.patch_size,
                                     j:j + args.patch_size])
                hr_patches.append(hr[i * args.scale:i * args.scale +
                                     args.patch_size * args.scale,
                                     j * args.scale:j * args.scale +
                                     args.patch_size * args.scale])

    lr_patches = np.array(lr_patches)
    hr_patches = np.array(hr_patches)

    h5_file.create_dataset('lr', data=lr_patches)
    h5_file.create_dataset('hr', data=hr_patches)

    h5_file.close()
def eval(args):
    h5_file = h5py.File(args.output_path + "_quality_{}".format(args.quality),
                        'w')

    lr_group = h5_file.create_group('lr')
    hr_group = h5_file.create_group('hr')
    k = 1
    for i, image_path in enumerate(
            sorted(glob.glob('{}/*'.format(args.images_dir)))):
        hr = pil_image.open(image_path).convert('RGB')
        hr_width = (hr.width // args.scale) * args.scale
        hr_height = (hr.height // args.scale) * args.scale
        hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
        lr = hr.resize((hr.width // args.scale, hr_height // args.scale),
                       resample=pil_image.BICUBIC)
        hr = np.array(hr).astype(np.float32)
        if args.compress:
            lr = compress_img(lr, args.quality)
        lr.save('dataset/eval_source/{}_loaded.jpg'.format(k),
                format='jpeg',
                quality=100,
                subsampling=0)
        lr = np.array(lr).astype(np.float32)
        print((lr.shape, hr.shape))
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)
        lr_group.create_dataset(str(i), data=lr)
        hr_group.create_dataset(str(i), data=hr)
        k += 1

    h5_file.close()
 def __getitem__(self, idx):
     with h5py.File(self.val_set, 'r') as f:
         lr = f['lr'][str(idx)][::]
         hr = f['hr'][str(idx)][::]
         lr = convert_rgb_to_y(lr)
         hr = convert_rgb_to_y(hr)
         lr = np.expand_dims(lr.astype(np.float32), 0) / 255.0
         hr = np.expand_dims(hr.astype(np.float32), 0) / 255.0
         return lr.astype(np.float32), hr.astype(np.float32)
Example #6
0
    def evaluate(self, filepath):
        input_image = align_image(load_image(filepath), self.scale)
        input_y_image = resize_image(convert_rgb_to_y(input_image),
                                     1 / self.scale)
        input_scaled_y_image = resize_image(input_y_image, self.scale)

        output_y_image = self.run(input_y_image, input_scaled_y_image)
        ground_truth_y_image = convert_rgb_to_y(input_image)

        return calc_psnr_and_ssim(ground_truth_y_image,
                                  output_y_image,
                                  border=self.scale)
 def __getitem__(self, idx):
     with h5py.File(self.training_set, 'r') as f:
         lr = f['lr'][str(idx)][::]
         hr = f['hr'][str(idx)][::]
         lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
         lr, hr = self.random_horizontal_flip(lr, hr)
         lr, hr = self.random_vertical_flip(lr, hr)
         lr, hr = self.random_rotate_90(lr, hr)
         lr = convert_rgb_to_y(lr)
         hr = convert_rgb_to_y(hr)
         lr = np.expand_dims(lr.astype(np.float32), 0) / 255.0
         hr = np.expand_dims(hr.astype(np.float32), 0) / 255.0
         return lr.astype(np.float32), hr.astype(np.float32)
Example #8
0
def train(args):
    h5_file = h5py.File(args.output_path, 'w')

    lr_patches = []
    hr_patches = []

    for image_path in sorted(glob.glob('{}/*'.format(args.images_dir))):
        hr = pil_image.open(image_path).convert('RGB')
        hr_images = []

        if args.with_aug:
            for s in [1.0, 0.9, 0.8, 0.7, 0.6]:
                for r in [0, 90, 180, 270]:
                    tmp = hr.resize((int(hr.width * s), int(hr.height * s)),
                                    resample=pil_image.BICUBIC)
                    tmp = tmp.rotate(r, expand=True)
                    hr_images.append(tmp)
        else:
            hr_images.append(hr)

        for hr in hr_images:
            hr_width = (hr.width // args.scale) * args.scale
            hr_height = (hr.height // args.scale) * args.scale
            hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
            lr = hr.resize((hr.width // args.scale, hr_height // args.scale),
                           resample=pil_image.BICUBIC)
            hr = np.array(hr).astype(np.float32)
            lr = np.array(lr).astype(np.float32)
            hr = convert_rgb_to_y(hr)
            lr = convert_rgb_to_y(lr)

            for i in range(0, lr.shape[0] - args.patch_size + 1, args.scale):
                for j in range(0, lr.shape[1] - args.patch_size + 1,
                               args.scale):
                    lr_patches.append(lr[i:i + args.patch_size,
                                         j:j + args.patch_size])
                    hr_patches.append(hr[i * args.scale:i * args.scale +
                                         args.patch_size * args.scale,
                                         j * args.scale:j * args.scale +
                                         args.patch_size * args.scale])

    lr_patches = np.array(lr_patches)
    hr_patches = np.array(hr_patches)

    h5_file.create_dataset('lr', data=lr_patches)
    h5_file.create_dataset('hr', data=hr_patches)

    h5_file.close()
Example #9
0
    def inference(self, input_image, output_dir, save_images=False):
        # Create scaled image
        scaled_image = resize_image(input_image, 2)

        # Create y and scaled y image
        input_y_image = convert_rgb_to_y(input_image)
        scaled_y_image = resize_image(input_y_image, self.scale)

        output_y_image = self.run(input_y_image, scaled_y_image)

        # Create result image
        scaled_ycbcr_image = convert_rgb_to_ycbcr(scaled_image)
        result_image = convert_y_and_cbcr_to_rgb(output_y_image,
                                                 scaled_ycbcr_image[:, :, 1:3])

        if save_images:
            save_image(input_image, "{}/original.jpg".format(output_dir))
            save_image(scaled_image, "{}/bicubic.jpg".format(output_dir))
            save_image(scaled_y_image,
                       "{}/bicubic_y.jpg".format(output_dir),
                       is_rgb=False)
            save_image(output_y_image,
                       "{}/result_y.jpg".format(output_dir),
                       is_rgb=False)
            save_image(result_image, "{}/result.jpg".format(output_dir))

        return result_image
def train(args):
    h5_file = h5py.File(args.output_path + "_quality_{}".format(args.quality),
                        'w')

    lr_patches = []
    hr_patches = []
    k = 1
    for image_path in sorted(glob.glob('{}/*'.format(args.images_dir))):
        hr = pil_image.open(image_path).convert('RGB')

        hr_width = (hr.width // args.scale) * args.scale
        hr_height = (hr.height // args.scale) * args.scale
        hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
        lr = hr.resize((hr.width // args.scale, hr_height // args.scale),
                       resample=pil_image.BICUBIC)
        hr = np.array(hr).astype(np.float32)
        if args.compress:
            lr = compress_img(lr, args.quality)
        lr.save('dataset/train_source/{}_loaded.jpg'.format(k),
                format='jpeg',
                quality=100,
                subsampling=0)
        lr = np.array(lr).astype(np.float32)
        print((lr.shape, hr.shape))
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)
        k += 1
        for i in range(0, lr.shape[0] - args.patch_size + 1, args.scale):
            for j in range(0, lr.shape[1] - args.patch_size + 1, args.scale):
                lr_patches.append(lr[i:i + args.patch_size,
                                     j:j + args.patch_size])
                hr_patches.append(hr[i * args.scale:i * args.scale +
                                     args.patch_size * args.scale,
                                     j * args.scale:j * args.scale +
                                     args.patch_size * args.scale])

    lr_patches = np.array(lr_patches)
    hr_patches = np.array(hr_patches)

    h5_file.create_dataset('lr', data=lr_patches)
    h5_file.create_dataset('hr', data=hr_patches)

    h5_file.close()
Example #11
0
def train(args):
    h5_file = h5py.File(args.h5_path, 'w')

    lr_patches = []
    hr_patches = []

    # for image_path in sorted(glob.glob('{}/*'.format(args.images_dir))):
    train_list = os.listdir(args.images_dir)
    count = 0
    for num, img_name in enumerate(train_list):
        img_jpgname = img_name.replace('.bmp','.jpg')
        image_path = os.path.join(args.images_dir, img_name)
        image_jpgpath = os.path.join(args.jpg_image_dir, img_jpgname)
        hr = Image.open(image_path).convert('RGB')
        # hr_width = (hr.width // args.scale) * args.scale
        # hr_height = (hr.height // args.scale) * args.scale
        # hr = hr.resize((hr_width, hr_height), resample=Image.BICUBIC) # hr.size = 512 -> 510
        # hr_blur = hr.filter(ImageFilter.GaussianBlur(2))
        # lr = hr_blur.resize((hr_width // args.scale, hr_height // args.scale), resample=Image.BICUBIC) # lr.size = 510/3 -> 170

        hr.save(image_jpgpath, quality=args.JPEG_factor)
        img_pil_jpg = Image.open(image_jpgpath).convert('RGB')
        # TODO: add denoise.
        hr = np.array(hr).astype(np.float32)
        lr = np.array(img_pil_jpg).astype(np.float32)
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)
        # cutting the pairs of patches

        for i in range(0, lr.shape[0] - args.patch_size + 1, args.stride):
            for j in range(0, lr.shape[1] - args.patch_size + 1, args.stride):
                lr_patches.append(lr[i:i + args.patch_size, j:j + args.patch_size])
                hr_patches.append(hr[i:i + args.patch_size, j:j + args.patch_size])
                count = count + 1

    print('number of pairs: ', count)
    lr_patches = np.array(lr_patches)
    hr_patches = np.array(hr_patches)

    h5_file.create_dataset('lr', data=lr_patches)
    h5_file.create_dataset('hr', data=hr_patches)

    h5_file.close()
Example #12
0
def prepare_x_images(images):
    all_x_images_y = []
    all_x_images_bicubic = []

    for image in images:
        all_x_images_y.append(utils.convert_rgb_to_y(image))
        x = utils.resize_image_by_pil(image, 3)
        quad_image = np.zeros([32, 32, 9])
        utils.convert_to_multi_channel_image(quad_image, x, 3)
        all_x_images_bicubic.append(quad_image)

    return (all_x_images_y, all_x_images_bicubic)
Example #13
0
def eval(args):
    h5_file = h5py.File(args.h5_path, 'w')

    lr_group = h5_file.create_group('lr')
    hr_group = h5_file.create_group('hr')
    # how to get IR image: downsample the HR image (bicubic resize)
    # what to input upsample the IR image, which gets IR'
    # chanel convert: rgb to y
    # no patch the image

    eval_list = os.listdir(args.images_dir)
    count = 0
    for num, img_name in enumerate(eval_list):
        img_jpgname = img_name.replace('.bmp','.jpg')
        image_path = os.path.join(args.images_dir, img_name)
        image_jpgpath = os.path.join(args.jpg_image_dir, img_jpgname)
        hr = Image.open(image_path).convert('RGB')

        hr.save(image_jpgpath, quality=args.JPEG_factor)
        img_pil_jpg = Image.open(image_jpgpath).convert('RGB')
        # TODO: add the denoise.

        hr = np.array(hr).astype(np.float32)
        lr = np.array(img_pil_jpg).astype(np.float32)
        hr = convert_rgb_to_y(hr)
        lr = convert_rgb_to_y(lr)
        # cutting the pairs of patches
        for i in range(0, lr.shape[0] - args.patch_size + 1, args.stride):
            for j in range(0, lr.shape[1] - args.patch_size + 1, args.stride):
                # lr_patches.append(lr[i:i + args.patch_size, j:j + args.patch_size])
                # hr_patches.append(hr[i:i + args.patch_size, j:j + args.patch_size])
                lr_group.create_dataset(str(count), data=lr[i:i + args.patch_size, j:j + args.patch_size])
                hr_group.create_dataset(str(count), data=hr[i:i + args.patch_size, j:j + args.patch_size])
                count = count+1
    print(count)

    h5_file.close()
Example #14
0
    def get_random_patched_image(self, filename):
        image = load_image(filename)
        height, width = image.shape[0:2]

        size = self.image_size * self.scale

        if height < size or width < size:
            print("Error: {} should have more than {} x {} size.".format(
                filename, size, size))
            return None

        x = random.randrange(height - size) if height != size else 0
        y = random.randrange(width - size) if width != size else 0

        image = image[x:x + size, y:y + size, :]

        # Convert 1 channel (y)
        image = convert_rgb_to_y(image)

        return image
for image_path in sorted(glob.glob('{}/*'.format(images_dir))):
    count += 1
    if (count == 100):
        break
    print(image_path)
    hr = pil_image.open(image_path).convert('RGB')
    hr_width = (hr.width // scale) * scale
    hr_height = (hr.height // scale) * scale
    hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
    lr = hr.resize((hr_width // scale, hr_height // scale),
                   resample=pil_image.BICUBIC)
    lr = lr.resize((lr.width * scale, lr.height * scale),
                   resample=pil_image.BICUBIC)
    hr = np.array(hr).astype(np.float32)
    lr = np.array(lr).astype(np.float32)
    hr = convert_rgb_to_y(hr)
    lr = convert_rgb_to_y(lr)

    for i in range(0, lr.shape[0] - patch_size + 1, stride):
        for j in range(0, lr.shape[1] - patch_size + 1, stride):
            lr_patches.append(lr[i:i + patch_size, j:j + patch_size])
            hr_patches.append(hr[i:i + patch_size, j:j + patch_size])

lr_patches = np.array(lr_patches)
hr_patches = np.array(hr_patches)
print("hi")
h5_file.create_dataset('lr', data=lr_patches)
h5_file.create_dataset('hr', data=hr_patches)

h5_file.close()
Example #16
0
    hr = image.resize((image_width, image_height), resample=pil_image.BICUBIC)
    lr = hr.resize((hr.width // args.scale, hr.height // args.scale),
                   resample=pil_image.BICUBIC)
    bicubic = lr.resize((lr.width * args.scale, lr.height * args.scale),
                        resample=pil_image.BICUBIC)
    bicubic.save(
        args.image_file.replace('.', '_bicubic_x{}.'.format(args.scale)))

    lr = np.expand_dims(
        np.array(lr).astype(np.float32).transpose([2, 0, 1]), 0) / 255.0
    hr = np.expand_dims(
        np.array(hr).astype(np.float32).transpose([2, 0, 1]), 0) / 255.0
    lr = torch.from_numpy(lr).to(device)
    hr = torch.from_numpy(hr).to(device)

    with torch.no_grad():
        preds = model(lr).squeeze(0)

    preds_y = convert_rgb_to_y(denormalize(preds), dim_order='chw')
    hr_y = convert_rgb_to_y(denormalize(hr.squeeze(0)), dim_order='chw')

    preds_y = preds_y[args.scale:-args.scale, args.scale:-args.scale]
    hr_y = hr_y[args.scale:-args.scale, args.scale:-args.scale]

    psnr = calc_psnr(hr_y, preds_y)
    print('PSNR: {:.2f}'.format(psnr))

    output = pil_image.fromarray(
        denormalize(preds).permute(1, 2, 0).byte().cpu().numpy())
    output.save(args.image_file.replace('.', '_rdn_x{}.'.format(args.scale)))
Example #17
0
        if (epoch + 1) % 10 == 0:
            torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))

        model.eval()
        epoch_psnr = AverageMeter()

        for data in eval_dataloader:
            inputs, labels = data

            inputs = inputs.to(device)
            labels = labels.to(device)

            with torch.no_grad():
                preds = model(inputs)

            preds = convert_rgb_to_y(denormalize(preds.squeeze(0)), dim_order='chw')
            labels = convert_rgb_to_y(denormalize(labels.squeeze(0)), dim_order='chw')

            preds = preds[args.scale:-args.scale, args.scale:-args.scale]
            labels = labels[args.scale:-args.scale, args.scale:-args.scale]

            epoch_psnr.update(calc_psnr(preds, labels), len(inputs))

        print('eval psnr: {:.2f}'.format(epoch_psnr.avg))

        if epoch_psnr.avg > best_psnr:
            best_epoch = epoch
            best_psnr = epoch_psnr.avg
            best_weights = copy.deepcopy(model.state_dict())

    print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))