Beispiel #1
0
def DIV2k_HR2LR(HRpath, LRsuffix, scale, IMG_EXTENSION):
    """
    :param HRpath: the HRpath to HR images
    :return: the LRpath to LR images
    """
    images = list(scandir(HRpath, suffix=IMG_EXTENSION, recursive=True))
    images = [osp.join(HRpath, v) for v in images]
    assert images, f'{HRpath} has no valid image file.'
    with alive_bar(len(images)) as bar:   # declare your expected total
        for image in images:               # iterate as usual
            HRimg = imread(image, flag='color')
            # notice: we use opencv area interpolation method by default, you can change your own method. e.g. pillow bicubic
            LRimg = imrescale(HRimg, 1.0/scale)
            dirpath = osp.dirname(image)
            dirname = osp.basename(dirpath)
            if "HR" in dirname:
                newdirname = dirname.replace("HR", "LR")
            else:
                newdirname = dirname +"_LRx" + str(scale)
            dirdirpath = osp.dirname(dirpath)
            newdirpath = osp.join(dirdirpath, newdirname)

            HR_image_name = osp.splitext(osp.basename(image))[0]
            LR_image_name = HR_image_name + LRsuffix + IMG_EXTENSION
            imwrite(LRimg, osp.join(newdirpath, LR_image_name))
            bar()                        # call after consuming one item
Beispiel #2
0
    def test_step(self, batchdata, **kwargs):
        """test step.

        Args:
            batchdata: list for train_batch, numpy.ndarray or variable, length up to Collect class.

        Returns:
            list: outputs (already gathered from all threads)
        """
        output = test_generator_batch(batchdata[0], netG=self.generator)
        save_image_flag = kwargs.get('save_image')
        if save_image_flag:
            save_path = kwargs.get('save_path', None)
            start_id = kwargs.get('sample_id', None)
            if save_path is None or start_id is None:
                raise RuntimeError(
                    "if save image in test_step, please set 'save_path' and 'sample_id' parameters"
                )
            G = output
            for idx in range(G.shape[0]):
                imwrite(tensor2img(G[idx], min_max=(-0.5, 0.5)),
                        file_path=save_path +
                        "_idx_{}.png".format(start_id + idx))
        return [
            output,
        ]
Beispiel #3
0
 def viz(self, mask, idx, tl, br, clipname, img):
     c = np.zeros((180, 320))
     for h, w in mask:
         c[h, w] = 1
     c = (c * 255).astype(np.uint8)
     cv2.rectangle(c, (tl[1], tl[0]), (br[1], br[0]), (255, 0, 0), 1)
     cv2.rectangle(img, (tl[1], tl[0]), (br[1], br[0]), (255, 0, 0), 1)
     imwrite(c, "./{}/{}_mask.png".format(clipname, idx))
     imwrite(img, "./{}/{}_img.png".format(clipname, idx))
Beispiel #4
0
    def test_step(self, batchdata, **kwargs):
        """test step.

        Args:
            batchdata: list for train_batch, numpy.ndarray or variable, length up to Collect class.

        Returns:
            list: outputs (already gathered from all threads)
        """
        epoch = kwargs.get('epoch', 0)
        images = batchdata[0]  # [B,N,C,H,W]
        images = ensemble_forward(images, Type=epoch)  # for ensemble

        H, W = images.shape[-2], images.shape[-1]
        scale = getattr(self.generator, 'upscale_factor', 4)
        padding_multi = self.eval_cfg.get('padding_multi', 1)
        # padding for H and W
        images = img_multi_padding(images,
                                   padding_multi=padding_multi,
                                   pad_value=-0.5)  # [B,N,C,H,W]
        output = test_generator_batch(images,
                                      get_mid_bicubic(images),
                                      netG=self.generator)  # HR [B,C,4H,4W]
        output = img_de_multi_padding(output,
                                      origin_H=H * scale,
                                      origin_W=W * scale)

        # back ensemble for G
        G = ensemble_back(output, Type=epoch)

        save_image_flag = kwargs.get('save_image')
        if save_image_flag:
            save_path = kwargs.get('save_path', None)
            start_id = kwargs.get('sample_id', None)
            if save_path is None or start_id is None:
                raise RuntimeError(
                    "if save image in test_step, please set 'save_path' and 'sample_id' parameters"
                )
            for idx in range(G.shape[0]):
                if epoch == 0:
                    imwrite(tensor2img(G[idx], min_max=(-0.5, 0.5)),
                            file_path=os.path.join(
                                save_path,
                                "idx_{}.png".format(start_id + idx)))
                else:
                    imwrite(tensor2img(G[idx], min_max=(-0.5, 0.5)),
                            file_path=os.path.join(
                                save_path, "idx_{}_epoch_{}.png".format(
                                    start_id + idx, epoch)))

        return [
            output,
        ]
    def test_aggre(self, save_path, padding_len=4, start_index=1):
        clip_names = sorted(self.frame_num.keys())  # e.g. [`city`, `walk`]
        frame_nums = [self.frame_num[clip] for clip in clip_names]

        do_frames = 0
        now_clip_idx = 0
        total_deal = 0
        for _ in range(len(self)):
            do_frames += 1
            if do_frames == frame_nums[now_clip_idx]:
                clip_name = clip_names[now_clip_idx]
                # move images to dir use shutil
                save_dir_path = osp.join(save_path, clip_name)
                mkdir_or_exist(save_dir_path)
                # index from [total_deal, total_deal + do_frames)
                for idx in range(total_deal, total_deal + do_frames):
                    ensemble_path_1 = osp.join(
                        save_path, "idx_{}_epoch_1.png".format(idx))
                    desti_path = osp.join(
                        save_dir_path,
                        str(idx - total_deal + start_index).zfill(padding_len)
                        + ".png")
                    if osp.exists(ensemble_path_1):
                        # get the content
                        path = osp.join(save_path, "idx_{}.png".format(idx))
                        sum_result = imread(path, flag='unchanged').astype(
                            np.float32)
                        os.remove(path)
                        for e in range(1, 8):
                            path = osp.join(
                                save_path,
                                "idx_{}_epoch_{}.png".format(idx, e))
                            sum_result = sum_result + imread(
                                path, flag='unchanged').astype(np.float32)
                            os.remove(path)
                        sum_result = sum_result / 8
                        # 四舍五入
                        sum_result = sum_result.round().astype(np.uint8)
                        # save
                        imwrite(sum_result, desti_path)
                    else:
                        # move
                        shutil.move(
                            osp.join(save_path, "idx_" + str(idx) + ".png"),
                            desti_path)

                total_deal += do_frames
                do_frames = 0
                now_clip_idx += 1
Beispiel #6
0
def mp42png(HRpath, destidir, write_flag = True):
    LRpath = HRpath.replace("HR", "LR") + '_down4x.mp4'
    video_name = os.path.splitext(ntpath.basename(HRpath))[0]
    
    HRdir = os.path.join(destidir, "HR", video_name)
    LRdir = os.path.join(destidir, "LR", video_name)

    container_down4x = av.open(LRpath)
    container_origin = av.open(HRpath)
    frames_down4x = container_down4x.decode(video=0)
    frames_origin = container_origin.decode(video=0)

    fc_down4x = frame_count(container_down4x)
    fc_origin = frame_count(container_origin)
    extra = fc_down4x - fc_origin

    if extra>0:
        print("video: {} 's LR frames largers than HR frames nums: {}".format(video_name, extra))
    
    # 由于视频编码和 FFmpeg 实现的问题,压缩前后的帧数可能会不等,下采样版本的视频可能数量少几帧。
    # 这时,您需要注意跳过下采样版本视频缺少的帧数。
    if extra > 0:
        for _ in range(extra):
            next(frames_down4x)

    count = 0

    for _, (frame_down4x, frame_origin) in enumerate(zip(frames_down4x, frames_origin)):
        img_origin = frame_origin.to_rgb().to_ndarray() # (1920, 1080, 3)
        if img_origin.shape[0] < 256 or img_origin.shape[1] < 256:
            continue
            
        img_down4x = frame_down4x.to_rgb().to_ndarray()
        assert img_down4x.shape[2] == img_origin.shape[2] and img_origin.shape[2] == 3
        if img_down4x.shape[0] * 4 != img_origin.shape[0] or img_down4x.shape[1] * 4 != img_origin.shape[1]:
            print("video: {} 's frame {} shape do not right, do resize, down shape:{}, origin shape: {}".format(video_name, count, img_down4x.shape, img_origin.shape))
            print("resize origin shape to 1920 1080 and down shape to 480 270")
            img_down4x = cv2.resize(img_down4x, (1920 // 4, 1080 // 4))
            img_origin = cv2.resize(img_origin, (1920, 1080))

        if write_flag and not os.path.exists(os.path.join(HRdir, '%05d.png' % count)):
            imwrite(img_origin, file_path=os.path.join(HRdir, '%05d.png' % count))
            imwrite(img_down4x, file_path=os.path.join(LRdir, '%05d.png' % count))

        count += 1

    container_down4x.close()
    container_origin.close()
Beispiel #7
0
def main():
    BATCH_SIZE = 1
    place = fluid.CPUPlace()
    dataset_obj = SRFolderDataset(lq_folder=lq_folder, gt_folder=gt_folder, pipeline=train_pipeline, scale=4)

    with fluid.dygraph.guard(place):
        train_sampler = DistributedBatchSampler(
            dataset_obj,
            batch_size=BATCH_SIZE,
            shuffle=True,
            drop_last=True)
        train_loader = DataLoader(
            dataset_obj,
            batch_sampler=train_sampler,
            places=place,
            num_workers=4,
            return_list=True)

        for batch_id, data in enumerate(train_loader):
            imwrite(var2img(data[1]), "./test/niu.png")
            break
Beispiel #8
0
    def test_step(self, batchdata, **kwargs):
        """
            possible kwargs:
                save_image
                save_path
                ensemble
        """
        lq = batchdata['lq']  #  [B,3,h,w]
        gt = batchdata.get('gt', None)  # if not None: [B,3,4*h,4*w]
        assert len(batchdata['lq_path']) == 1  # 每个sample所带的lq_path列表长度仅为1, 即自己
        lq_paths = batchdata['lq_path'][0]  # length 为batch长度
        now_start_id, clip = self.get_img_id(lq_paths[0])
        now_end_id, _ = self.get_img_id(lq_paths[-1])
        assert clip == _

        if now_start_id == 0:
            print("first frame: {}".format(lq_paths[0]))
            self.LR_list = []
            self.HR_list = []

        # pad lq
        B, _, origin_H, origin_W = lq.shape
        lq = img_multi_padding(lq,
                               padding_multi=self.eval_cfg.multi_pad,
                               pad_method="edge")  #  edge  constant
        self.LR_list.append(lq)  # [1,3,h,w]

        if gt is not None:
            for i in range(B):
                self.HR_list.append(gt[i:i + 1, ...])

        if now_end_id == 99:
            print("start to forward all frames....")
            if self.eval_cfg.gap == 1:
                # do ensemble (8 times)
                ensemble_res = []
                self.LR_list = np.concatenate(self.LR_list,
                                              axis=0)  # [100, 3,h,w]
                for item in tqdm(range(8)):  # do not have flip
                    inp = mge.tensor(ensemble_forward(self.LR_list, Type=item),
                                     dtype="float32")
                    oup = test_generator_batch(F.expand_dims(inp, axis=0),
                                               netG=self.generator)
                    ensemble_res.append(ensemble_back(oup.numpy(), Type=item))
                self.HR_G = sum(ensemble_res) / len(
                    ensemble_res)  # ensemble_res 结果取平均
            elif self.eval_cfg.gap == 2:
                raise NotImplementedError("not implement gap != 1 now")
                # self.HR_G_1 = test_generator_batch(F.stack(self.LR_list[::2], axis=1), netG=self.generator)
                # self.HR_G_2 = test_generator_batch(F.stack(self.LR_list[1::2], axis=1), netG=self.generator) # [B,T,C,H,W]
                # # 交叉组成HR_G
                # res = []
                # _,T1,_,_,_ = self.HR_G_1.shape
                # _,T2,_,_,_ = self.HR_G_2.shape
                # assert T1 == T2
                # for i in range(T1):
                #     res.append(self.HR_G_1[:, i, ...])
                #     res.append(self.HR_G_2[:, i, ...])
                # self.HR_G = F.stack(res, axis=1) # [B,T,C,H,W]
            else:
                raise NotImplementedError("do not support eval&test gap value")

            scale = self.generator.upscale_factor
            # get numpy
            self.HR_G = img_de_multi_padding(
                self.HR_G,
                origin_H=origin_H * scale,
                origin_W=origin_W * scale)  # depad for HR_G   [B,T,C,H,W]

            if kwargs.get('save_image', False):
                print("saving images to disk ...")
                save_path = kwargs.get('save_path')
                B, T, _, _, _ = self.HR_G.shape
                assert B == 1
                assert T == 100
                for i in range(T):
                    img = tensor2img(self.HR_G[0, i, ...], min_max=(0, 1))
                    if (i + 1) % 10 == 0:
                        imwrite(img,
                                file_path=os.path.join(
                                    save_path, "partframes",
                                    f"{clip}_{str(i).zfill(8)}.png"))
                    imwrite(img,
                            file_path=os.path.join(
                                save_path, "allframes",
                                f"{clip}_{str(i).zfill(8)}.png"))

        return now_end_id == 99
Beispiel #9
0
    def test_step(self, batchdata, **kwargs):
        """test step.
           need to know whether the first frame for video, and every step restore some hidden state.
        Args:
            batchdata: list for train_batch, numpy.ndarray, length up to Collect class.

        Returns:
            list: outputs
        """
        epoch = kwargs.get('epoch', 0)
        image = batchdata[0]  # [B,T,C,H,W]
        image = ensemble_forward(image, Type=epoch)  # for ensemble

        H, W = image.shape[-2], image.shape[-1]
        scale = getattr(self.generator, 'upscale_factor', 4)
        padding_multi = self.eval_cfg.get('padding_multi', 1)
        # padding for H and W
        image = img_multi_padding(image,
                                  padding_multi=padding_multi,
                                  pad_value=-0.5)  # [B,T,C,H,W]

        assert image.shape[0] == 1  # only support batchsize 1
        assert len(batchdata[1].shape) == 1  # first frame flag
        if batchdata[1][0] > 0.5:  # first frame
            print("first frame")
            self.now_test_num = 1
            B, _, _, now_H, now_W = image.shape
            print("use now_H : {} and now_W: {}".format(now_H, now_W))
            self.pre_SD = np.zeros((B, hidden_channels, now_H, now_W),
                                   dtype=np.float32)

        outputs = test_generator_batch(image, self.pre_SD, netG=self.generator)
        outputs = list(outputs)
        outputs[0] = img_de_multi_padding(outputs[0],
                                          origin_H=H * scale,
                                          origin_W=W * scale)

        for i in range(len(outputs)):
            outputs[i] = outputs[i].numpy()

        # update hidden state
        G, self.pre_SD = outputs

        # back ensemble for G
        G = ensemble_back(G, Type=epoch)

        save_image_flag = kwargs.get('save_image')
        if save_image_flag:
            save_path = kwargs.get('save_path', None)
            start_id = kwargs.get('sample_id', None)
            if save_path is None or start_id is None:
                raise RuntimeError(
                    "if save image in test_step, please set 'save_path' and 'sample_id' parameters"
                )
            for idx in range(G.shape[0]):
                if epoch == 0:
                    imwrite(tensor2img(G[idx], min_max=(-0.5, 0.5)),
                            file_path=os.path.join(
                                save_path,
                                "idx_{}.png".format(start_id + idx)))
                else:
                    imwrite(tensor2img(G[idx], min_max=(-0.5, 0.5)),
                            file_path=os.path.join(
                                save_path, "idx_{}_epoch_{}.png".format(
                                    start_id + idx, epoch)))

        print("now test num: {}".format(self.now_test_num))
        self.now_test_num += 1
        return outputs
Beispiel #10
0
        self.keys = keys
        self.colorjitter = mge_color_jitter(brightness, contrast, saturation, hue)

    def __call__(self, results):
        """Call function.

        Args:
            results (dict): A dict containing the necessary information and data for augmentation.

        Returns:
            dict: A dict containing the processed data and information.
        """
        for key in self.keys:
            if isinstance(results[key], list):
                results[key] = [
                    self.colorjitter.apply(v) for v in results[key]
                ]
            else:
                results[key] = self.colorjitter.apply(results[key])
        return results

    def __repr__(self):
        format_string = self.__class__.__name__
        return format_string

cj = ColorJitter(keys=["img"])
img = np.ones((100, 100, 3), dtype=np.uint8) * 100
results = {'img':img}
results = cj(results)
imwrite(results['img'], file_path ="./test.png")
Beispiel #11
0

for DIR in do_list:
    print("now deal {}".format(DIR))
    now_dir_ans = os.path.join(path_ans, DIR)
    images_ans = sorted(list(scandir(now_dir_ans, suffix=IMG_EXTENSIONS, recursive=True)))
    images_ans = [os.path.join(now_dir_ans, v) for v in images_ans]
    print(images_ans[:10])

    now_dir_ref = os.path.join(path_ref, DIR)
    images_ref = sorted(list(scandir(now_dir_ref, suffix=IMG_EXTENSIONS, recursive=True)))
    images_ref = [os.path.join(now_dir_ref, v) for v in images_ref]
    print(images_ref[:10])

    assert len(images_ans) == len(images_ref)

    for i in range(len(images_ans)):
        print(i)
        # read image from images_ans and ref  , 用ref的上下四个像素填补ans,并原路写回
        ans = imread(images_ans[i], flag='unchanged')
        ref = imread(images_ref[i], flag='unchanged')

        # [H,W,C]
        ans[0:3, :, :] = ref[0:3, : ,:]
        ans[-3:, :, :] = ref[-3:, :, :]

        # write back
        imwrite(ans, images_ans[i])