示例#1
0
def DIV2k_HR2LR(HRpath, LRsuffix, scale, IMG_EXTENSION):
    """
    :param HRpath: the HRpath to HR images
    :return: the LRpath to LR images
    """
    images = list(scandir(HRpath, suffix=IMG_EXTENSION, recursive=True))
    images = [osp.join(HRpath, v) for v in images]
    assert images, f'{HRpath} has no valid image file.'
    with alive_bar(len(images)) as bar:   # declare your expected total
        for image in images:               # iterate as usual
            HRimg = imread(image, flag='color')
            # notice: we use opencv area interpolation method by default, you can change your own method. e.g. pillow bicubic
            LRimg = imrescale(HRimg, 1.0/scale)
            dirpath = osp.dirname(image)
            dirname = osp.basename(dirpath)
            if "HR" in dirname:
                newdirname = dirname.replace("HR", "LR")
            else:
                newdirname = dirname +"_LRx" + str(scale)
            dirdirpath = osp.dirname(dirpath)
            newdirpath = osp.join(dirdirpath, newdirname)

            HR_image_name = osp.splitext(osp.basename(image))[0]
            LR_image_name = HR_image_name + LRsuffix + IMG_EXTENSION
            imwrite(LRimg, osp.join(newdirpath, LR_image_name))
            bar()                        # call after consuming one item
示例#2
0
def mp4s2pngs(mp4dir, IMG_EXTENSION, destidir):
    videos = list(scandir(mp4dir, suffix=IMG_EXTENSION, recursive=True))
    videos = [osp.join(mp4dir, v) for v in videos]
    with alive_bar(len(videos)) as bar:  # declare your expected total
        for HRpath in videos:
            mp42png(HRpath, destidir)
            bar()                        # call after consuming one item
示例#3
0
    def load_annotations(self):
        # get keys
        keys = sorted(list(scandir(self.lq_folder, suffix=IMG_EXTENSIONS, recursive=True)),
                        key=get_key_for_video_imgs)

        # do split for train and eval
        if self.eval_part is not None:
            if self.mode == "train":
                keys = [k for k in keys if k.split('/')[0] not in self.eval_part]
            elif self.mode == "eval":
                keys = [k for k in keys if k.split('/')[0] in self.eval_part]
            else:
                pass

        self.frame_num = dict()
        for key in keys:
            self.frame_num[key.split("/")[0]] = 0
        for key in keys:
            self.frame_num[key.split("/")[0]] += 1

        data_infos = []
        for key in keys:
            # do some checks, to make sure the key for LR and HR is same. 
            if self.mode in ("train", "eval"):
                gt_path = os.path.join(self.gt_folder, key)
                assert os.path.exists(gt_path), "please make sure the key {} for LR and HR is same".format(key)

            if self.mode == "train":
                data_infos.append(
                    dict(
                        lq_path=self.lq_folder,
                        gt_path=self.gt_folder,
                        key=key,
                        max_frame_num=self.frame_num[key.split("/")[0]],
                        num_input_frames=self.num_input_frames
                    )
                )
            elif self.mode == "eval":
                data_infos.append(
                    dict(
                        lq_path = self.lq_folder,
                        gt_path = self.gt_folder,
                        key = key,
                        max_frame_num=self.frame_num[key.split("/")[0]],
                        num_input_frames=self.num_input_frames
                    )
                )
            elif self.mode == "test":
                data_infos.append(
                    dict(
                        lq_path = self.lq_folder,
                        key = key,
                        max_frame_num=self.frame_num[key.split("/")[0]],
                        num_input_frames=self.num_input_frames
                    )
                )
            else:
                raise NotImplementedError("")

        return data_infos
示例#4
0
    def scan_folder(path):
        """Obtain image path list (including sub-folders) from a given folder.

        Args:
            path (str | :obj:`Path`): Folder path.

        Returns:
            list[str]: image list obtained form given folder.
        """

        if isinstance(path, (str, Path)):
            path = str(path)
        else:
            raise TypeError("'path' must be a str or a Path object, "
                            f'but received {type(path)}.')

        images = sorted(list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True)))
        images = [osp.join(path, v) for v in images]
        assert images, f'{path} has no valid image file.'
        return images
示例#5
0
def unzip(zip_path):
    """Unzip zip files. It will scan all zip files in zip_path and return unzip
    folder names.

    Args:
        zip_path (str): Path for zip files.

    Returns:
        list: unzip folder names.
    """
    zip_files = scandir(zip_path, suffix='zip', recursive=False)
    import zipfile
    unzip_folders = []
    for zip_file in zip_files:
        zip_file = osp.join(zip_path, zip_file)
        unzip_folder = zip_file.replace('.zip', '').split('_part')[0]
        unzip_folder = osp.join(zip_path, unzip_folder)
        print(f'Unzip {zip_file} to {unzip_folder}')
        with zipfile.ZipFile(zip_file, 'r') as zip_ref:
            zip_ref.extractall(unzip_folder)
        unzip_folders.append(unzip_folder)
    return unzip_folders
示例#6
0
def make_lmdb(mode, data_path, lmdb_path, batch=5000, compress_level=1):
    """Create lmdb for the REDS dataset.

    Contents of lmdb. The file structure is:
    example.lmdb
    ├── data.mdb
    ├── lock.mdb
    ├── meta_info.txt

    The data.mdb and lock.mdb are standard lmdb files and you can refer to
    https://lmdb.readthedocs.io/en/release/ for more details.

    The meta_info.txt is a specified txt file to record the meta information
    of our datasets. It will be automatically created when preparing
    datasets by our provided dataset tools.
    Each line in the txt file records 1)image name (with extension),
    2)image shape, and 3)compression level, separated by a white space.

    For example, the meta information could be:
    `000_00000000.png (720,1280,3) 1`, which means:
    1) image name (with extension): 000_00000000.png;
    2) image shape: (720,1280,3);
    3) compression level: 1

    We use the image name without extension as the lmdb key.

    Args:
        mode (str): REDS dataset mode. Choices: ['train_sharp', 'train_blur',
            'train_blur_comp', 'train_sharp_bicubic', 'train_blur_bicubic'].
            They are used to identify different reds dataset for different
            tasks. Specifically:
            'train_sharp': GT frames;
            'train_blur': Blur frames for deblur task.
            'train_blur_comp': Blur and compressed frames for deblur and
                compression task.
            'train_sharp_bicubic': Bicubic downsampled sharp frames for SR
                task.
            'train_blur_bicubic': Bicubic downsampled blur frames for SR task.
        data_path (str): Data path for reading images.
        lmdb_path (str): Lmdb save path.
        batch (int): After processing batch images, lmdb commits.
            Default: 5000.
        compress_level (int): Compress level when encoding images. Default: 1.
    """

    print(f'Create lmdb for {data_path}, save to {lmdb_path}...')
    if mode in ['train_sharp', 'train_blur', 'train_blur_comp']:
        h_dst, w_dst = 720, 1280
    elif mode in ['train_sharp_bicubic', 'train_blur_bicubic']:
        h_dst, w_dst = 180, 320
    else:
        raise NotImplementedError("check mode: {}".format(mode))

    if osp.exists(lmdb_path):
        print(f'Folder {lmdb_path} already exists. Exit.')
        sys.exit(1)

    print('Reading image path list ...')
    img_path_list = sorted(list(scandir(data_path, suffix='png', recursive=True)))
    keys = []
    for img_path in img_path_list:
        parts = img_path.split('/')
        folder = parts[-2]
        img_name = parts[-1].split('.png')[0]
        keys.append(folder + '_' + img_name)  # example: 000_00000000

    # create lmdb environment
    # obtain data size for one image
    img = imread(osp.join(data_path, img_path_list[0]), flag='unchanged')
    _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
    data_size_per_img = img_byte.nbytes
    print('Data size per image is: ', data_size_per_img)
    data_size = data_size_per_img * len(img_path_list)
    env = lmdb.open(lmdb_path, map_size=data_size * 10)

    # write data to lmdb
    txn = env.begin(write=True)
    txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w')
    with alive_bar(len(img_path_list)) as bar:   # declare your expected total
        for idx, (path, key) in enumerate(zip(img_path_list, keys)):
            key_byte = key.encode('ascii')
            img = imread(osp.join(data_path, path), flag='unchanged')
            h, w, c = img.shape
            _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
            assert h == h_dst and w == w_dst and c == 3, (f'Wrong shape ({h, w}), should be ({h_dst, w_dst}).')
            txn.put(key_byte, img_byte)
            # write meta information
            txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n')
            if idx % batch == 0:
                txn.commit()
                txn = env.begin(write=True)
            bar()  # call after consuming one item
    txn.commit()
    env.close()
    txt_file.close()
    print('\nFinish writing lmdb.')
示例#7
0
    def load_annotations(self):
        # get keys
        keys = list(
            scandir(self.lq_folder, suffix=IMG_EXTENSIONS, recursive=True))
        keys = [v for v in keys if len(v.split('/')) == 2]
        keys = sorted(keys, key=get_key_for_video_imgs)  # 000/00000.png

        if self.lq_folder == self.gt_folder:
            # gt and lq in same dir, only select lq as keys
            keys = [key for key in keys if self.LR_symbol in key]

        # do split for train and eval
        if self.eval_part is not None:
            if self.mode == "train":
                keys = [
                    k for k in keys if k.split('/')[0] not in self.eval_part
                ]
            elif self.mode == "eval":
                keys = [k for k in keys if k.split('/')[0] in self.eval_part]
            else:
                pass

        self.frame_num = dict()
        for key in keys:
            self.frame_num[key.split("/")[0]] = 0
        for key in keys:
            self.frame_num[key.split("/")[0]] += 1

        # for meg competition
        sence = dict()
        # sence['92'] = []
        # sence['92'].append(304)
        # sence['93'] = []
        # sence['93'].append(156)
        # sence['93'].append(449)
        # sence['93'].append(740)
        # sence['94'] = []
        # sence['94'].append(363)
        # sence['94'].append(375)
        # sence['95'] = []
        # sence['95'].append(196)
        # sence['95'].append(272)
        # sence['95'].append(347)
        # sence['96'] = []
        # sence['96'].append(120)
        # sence['97'] = []
        # sence['97'].append(53)
        # sence['99'] = []
        # sence['99'].append(181)

        data_infos = []
        is_first = 1
        now_deal = 0
        for key in keys:
            # do some checks, to make sure the key for LR and HR is same.
            if self.mode in ("train", "eval"):
                gt_path = os.path.join(self.gt_folder,
                                       key.replace(self.LR_symbol, ""))
                assert os.path.exists(
                    gt_path
                ), "please make sure the key {} for LR and HR is same".format(
                    key)

            if self.mode == "train":
                data_infos.append(
                    dict(lq_path=self.lq_folder,
                         gt_path=self.gt_folder,
                         LRkey=key,
                         HRkey=key.replace(self.LR_symbol, ""),
                         max_frame_num=self.frame_num[key.split("/")[0]],
                         num_input_frames=self.num_input_frames))
            elif self.mode == "eval":
                data_infos.append(
                    dict(lq_path=os.path.join(self.lq_folder, key),
                         gt_path=os.path.join(self.gt_folder,
                                              key.replace(self.LR_symbol, "")),
                         is_first=is_first))
            elif self.mode == "test":
                data_infos.append(
                    dict(lq_path=os.path.join(self.lq_folder, key),
                         is_first=is_first))
            else:
                raise NotImplementedError("")

            # update is_first
            now_deal += 1
            if now_deal == self.frame_num[key.split("/")[0]]:
                is_first = 1
                now_deal = 0
            elif key.split("/")[0] in sence.keys() and now_deal in sence[
                    key.split("/")[0]]:
                is_first = 1
            else:
                is_first = 0
        return data_infos
示例#8
0
IMG_EXTENSIONS = ('.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm',
                  '.PPM', '.bmp', '.BMP')

do_list = ['91', '92', '93', '97', '98', '99']

path_ans = './workdirs/mucan_v5/test'

path_ref = './workdirs/mucan_v5/test1'

xiang = './workdirs/mucan_v5/test2'


for DIR in do_list:
    print("now deal {}".format(DIR))
    now_dir_ans = os.path.join(path_ans, DIR)
    images_ans = sorted(list(scandir(now_dir_ans, suffix=IMG_EXTENSIONS, recursive=True)))
    images_ans = [os.path.join(now_dir_ans, v) for v in images_ans]
    print(images_ans[:10])

    now_dir_ref = os.path.join(path_ref, DIR)
    images_ref = sorted(list(scandir(now_dir_ref, suffix=IMG_EXTENSIONS, recursive=True)))
    images_ref = [os.path.join(now_dir_ref, v) for v in images_ref]
    print(images_ref[:10])

    assert len(images_ans) == len(images_ref)

    for i in range(len(images_ans)):
        print(i)
        # read image from images_ans and ref  , 用ref的上下四个像素填补ans,并原路写回
        ans = imread(images_ans[i], flag='unchanged')
        ref = imread(images_ref[i], flag='unchanged')