Beispiel #1
0
    def __getitem__(self, index):
        folder = self.data_info['folder'][index]
        idx, max_idx = self.data_info['idx'][index].split('/')
        idx, max_idx = int(idx), int(max_idx)
        border = self.data_info['border'][index]
        lq_path = self.data_info['lq_path'][index]

        select_idx = util.generate_frame_indices(idx,
                                                 max_idx,
                                                 self.opt['num_frame'],
                                                 padding=self.opt['padding'])

        if self.cache_data:
            if self.opt['use_duf_downsampling']:
                # read imgs_gt to generate low-resolution frames
                imgs_lq = self.imgs_gt[folder].index_select(
                    0, torch.LongTensor(select_idx))
                imgs_lq = duf_downsample(imgs_lq,
                                         kernel_size=13,
                                         scale=self.opt['scale'])
            else:
                imgs_lq = self.imgs_lq[folder].index_select(
                    0, torch.LongTensor(select_idx))
            img_gt = self.imgs_gt[folder][idx]
        else:
            if self.opt['use_duf_downsampling']:
                img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
                # read imgs_gt to generate low-resolution frames
                imgs_lq = util.read_img_seq(img_paths_lq,
                                            require_mod_crop=True,
                                            scale=self.opt['scale'])
                imgs_lq = duf_downsample(imgs_lq,
                                         kernel_size=13,
                                         scale=self.opt['scale'])
            else:
                img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
                imgs_lq = util.read_img_seq(img_paths_lq)
            img_gt = util.read_img_seq([self.imgs_gt[folder][idx]],
                                       require_mod_crop=True,
                                       scale=self.opt['scale'])
            img_gt.squeeze_(0)

        return {
            'lq': imgs_lq,  # (t, c, h, w)
            'gt': img_gt,  # (c, h, w)
            'folder': folder,  # folder name
            'idx': self.data_info['idx'][index],  # e.g., 0/99
            'border': border,  # 1 for border, 0 for non-border
            'lq_path': lq_path  # center frame
        }
Beispiel #2
0
    def __getitem__(self, index):
        folder = self.data_info['folder'][index]
        idx, max_idx = self.data_info['idx'][index].split('/')
        idx, max_idx = int(idx), int(max_idx)
        border = self.data_info['border'][index]
        lq_path = self.data_info['lq_path'][index]

        select_idx = util.generate_frame_indices(idx,
                                                 max_idx,
                                                 self.opt['num_frame'],
                                                 padding=self.opt['padding'])

        if self.cache_data:
            if self.opt['use_duf_downsampling']:
                imgs_lq = self.imgs_gt[folder].index_select(
                    0, torch.LongTensor(select_idx))
                imgs_lq = duf_downsample(imgs_lq,
                                         kernel_size=13,
                                         scale=self.opt['scale'])
            else:
                imgs_lq = self.imgs_lq[folder].index_select(
                    0, torch.LongTensor(select_idx))
            img_gt = self.imgs_gt[folder][idx]
        else:
            if self.opt['use_duf_downsampling']:
                img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
                imgs_lq = util.read_img_seq(img_paths_lq,
                                            require_mod_crop=True,
                                            scale=self.opt['scale'])
                imgs_lq = duf_downsample(imgs_lq,
                                         kernel_size=13,
                                         scale=self.opt['scale'])
            else:
                img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
                imgs_lq = util.read_img_seq(img_paths_lq)
            img_gt = util.read_img_seq([self.imgs_gt[folder][idx]],
                                       require_mod_crop=True,
                                       scale=self.opt['scale'])

        return {
            'lq': imgs_lq,
            'gt': img_gt,
            'folder': folder,
            'idx': self.data_info['idx'][index],
            'border': border,
            'lq_path': lq_path
        }
Beispiel #3
0
def read_img_worker(path, key, compress_level, lr=False, use_wt=False, use_inter_wt=False, filters=None):
    """Read image worker.

    Args:
        path (str): Image path.
        key (str): Image key.
        compress_level (int): Compress level when encoding images.

    Returns:
        str: Image key.
        byte: Image byte.
        tuple[int]: Image shape.
    """

    img = mmcv.imread(path, flag='unchanged')
    
    # If lr is True, then downsample from groundtruth images using duf_downsample
    if lr:
        img = duf_downsample(torch.from_numpy(img).permute(2,0,1).unsqueeze(0), scale=4).squeeze().permute(1,2,0).numpy()

    elif use_wt:
        img = torch.from_numpy(img / 255.0).float()
        if img.ndim == 2:
            print(img.shape, 'weird shape of image c=1')
            sys.exit(1)
            img = wt(img.unsqueeze(0).unsqueeze(0).to('cuda:0'), filters, levels=3)[:, :, :64, :64].squeeze().cpu().numpy()
        elif img.ndim == 3:
            img = wt(img.permute(2,0,1).unsqueeze(0).to('cuda:0'), filters, levels=3)[:, :, :64, :64].squeeze().permute(1,2,0).cpu().numpy()
    elif use_inter_wt:
        img = mmcv.image.imresize(img, (128, 128), interpolation='lanczos', backend='cv2')
        img = torch.from_numpy(img / 255.0).float()
        if img.ndim == 2:
            img = wt(img.unsqueeze(0).unsqueeze(0).to('cuda:0'), filters, levels=2)[:, :, :64, :64].squeeze().cpu().numpy()
        elif img.ndim == 3:
            img = wt(img.permute(2,0,1).unsqueeze(0).to('cuda:0'), filters, levels=2)[:, :, :64, :64].squeeze().permute(1,2,0).cpu().numpy()

    if img.ndim == 2:
        h, w = img.shape
        c = 1
    else:
        h, w, c = img.shape
    
    if not (use_wt or use_inter_wt):
        _, img_byte = cv2.imencode('.png', img,
                                    [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
    # If writing WT patch in bytes, use np tobytes() function
    else:
        img_byte = img.tobytes()


    return (key, img_byte, (h, w, c))
Beispiel #4
0
def worker(path, opt):
    """Worker for each process.

    Args:
        path (str): Image path.
        opt (dict): Configuration dict. It contains:
            crop_size (int): Crop size.
            save_folder (str): Path to save folder.
            compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.

    Returns:
        process_info (str): Process information displayed in progress bar.
    """
    scale = opt['scale']
    img_name, extension = osp.splitext(osp.basename(path))
    extension = '.png'

    # remove the x2, x3, x4 and x8 in the filename for DIV2K
    img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
    
    if img.ndim == 2:
        h, w = img.shape
    elif img.ndim == 3:
        h, w, c = img.shape
    else:
        raise ValueError(f'Image ndim should be 2 or 3, but got {img.ndim}')

    img = ToTensor()(img)
    downsampled_img = duf_downsample(img.unsqueeze(0), scale=scale).squeeze(0)
    downsampled_img *= 255.0
    downsampled_img = downsampled_img.numpy().transpose(1,2,0)

    cv2.imwrite(
        osp.join(opt['save_folder'],
                    f'{img_name}{extension}'), downsampled_img,
        [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
    process_info = f'Processing {img_name} ...'
    return process_info