Example #1
0
def test_get_frame():
    with toolbox.video_capture(VIDEO_PATH) as cap:
        first = 5
        second = 8

        toolbox.video_jump(cap, first)
        actual = toolbox.get_frame_time(cap, first)
        should = toolbox.get_current_frame_time(cap)

        # should be frame 5
        assert actual == should
        assert actual - 0.16 < 0.01

        # 5 -> 8 -> 5
        frame = toolbox.get_frame(cap, second, True)
        assert frame is not None
        # grab, and recover
        # the next frame will be 5
        # the current frame is 4
        assert toolbox.get_current_frame_id(cap) == first - 1

        # 5 -> 8
        frame = toolbox.get_frame(cap, second)
        assert frame is not None
        assert toolbox.get_current_frame_id(cap) == second

        #
        cur_time = toolbox.get_current_frame_time(cap)
        toolbox.get_frame_time(cap, second, True)
        assert toolbox.get_current_frame_time(cap) == cur_time
Example #2
0
 def is_loop(self, threshold: float = None, **_) -> bool:
     if not threshold:
         threshold = 0.95
     with toolbox.video_capture(video_path=self.video_path) as cap:
         start_frame = toolbox.get_frame(cap, self.start)
         end_frame = toolbox.get_frame(cap, self.end)
         start_frame, end_frame = map(toolbox.compress_frame, (start_frame, end_frame))
         return toolbox.compare_ssim(start_frame, end_frame) > threshold
Example #3
0
    def contain_image(self,
                      image_path: str = None,
                      image_object: np.ndarray = None,
                      threshold: float = None,
                      *args,
                      **kwargs):
        assert image_path or image_object, 'should fill image_path or image_object'
        if not threshold:
            threshold = 0.99

        if image_path:
            logger.debug(f'found image path, use it first: {image_path}')
            assert os.path.isfile(
                image_path), f'image {image_path} not existed'
            image_object = cv2.imread(image_path)
        image_object = toolbox.turn_grey(image_object)

        # TODO use client or itself..?
        fi = FindIt(engine=['template'])
        fi_template_name = 'default'
        fi.load_template(fi_template_name, pic_object=image_object)

        with toolbox.video_capture(self.video.path) as cap:
            target_id = self.pick(*args, **kwargs)[0]
            frame = toolbox.get_frame(cap, target_id)
            frame = toolbox.turn_grey(frame)

            result = fi.find(str(target_id), target_pic_object=frame)
        find_result = result['data'][fi_template_name]['TemplateEngine']
        position = find_result['target_point']
        sim = find_result['target_sim']
        logger.debug(f'position: {position}, sim: {sim}')
        return sim > threshold
Example #4
0
    def get_stable_stage_sample(
        data_list: typing.List[ClassifierResult], *args, **kwargs
    ) -> np.ndarray:
        last = data_list[0]
        picked: typing.List[ClassifierResult] = [last]
        for each in data_list:
            # ignore unstable stage
            if each.stage == UNSTABLE_FLAG:
                continue
            if last.stage != each.stage:
                last = each
                picked.append(each)

        def get_split_line(f):
            return np.zeros((f.shape[0], 5))

        with toolbox.video_capture(last.video_path) as cap:
            frame_list: typing.List[np.ndarray] = []
            for each in picked:
                frame = toolbox.get_frame(cap, each.frame_id)
                frame = toolbox.compress_frame(frame, *args, **kwargs)
                split_line = get_split_line(frame)
                frame_list.append(frame)
                frame_list.append(split_line)
        return np.hstack(frame_list)
Example #5
0
    def contain_image(self,
                      image_path: str = None,
                      image_object: np.ndarray = None,
                      *args,
                      **kwargs) -> typing.Dict:
        assert image_path or image_object, 'should fill image_path or image_object'

        if image_path:
            logger.debug(f'found image path, use it first: {image_path}')
            assert os.path.isfile(
                image_path), f'image {image_path} not existed'
            image_object = toolbox.imread(image_path)
        image_object = toolbox.turn_grey(image_object)

        # TODO use client or itself..?
        fi = FindIt(engine=['template'])
        fi_template_name = 'default'
        fi.load_template(fi_template_name, pic_object=image_object)

        with toolbox.video_capture(self.video.path) as cap:
            target_id = self.pick(*args, **kwargs)[0]
            frame = toolbox.get_frame(cap, target_id)
            frame = toolbox.turn_grey(frame)

            result = fi.find(str(target_id), target_pic_object=frame)
        return result['data'][fi_template_name]['TemplateEngine']
Example #6
0
    def pick_and_save(self,
                      range_list: typing.List[VideoCutRange],
                      frame_count: int,
                      to_dir: str = None,
                      compress_rate: float = None,
                      *args, **kwargs) -> str:
        stage_list = list()
        for index, each_range in enumerate(range_list):
            picked = each_range.pick(frame_count, *args, **kwargs)
            logger.info(f'pick {picked} in range {each_range}')
            stage_list.append((index, picked))

        # create parent dir
        if not to_dir:
            to_dir = toolbox.get_timestamp_str()
        os.makedirs(to_dir, exist_ok=True)

        for each_stage_id, each_frame_list in stage_list:
            # create sub dir
            each_stage_dir = os.path.join(to_dir, str(each_stage_id))
            os.makedirs(each_stage_dir, exist_ok=True)

            with toolbox.video_capture(self.video_path) as cap:
                for each_frame_id in each_frame_list:
                    each_frame_path = os.path.join(each_stage_dir, f'{uuid.uuid4()}.png')
                    each_frame = toolbox.get_frame(cap, each_frame_id - 1)
                    if compress_rate:
                        each_frame = toolbox.compress_frame(each_frame, compress_rate)
                    cv2.imwrite(each_frame_path, each_frame)
                    logger.debug(f'frame [{each_frame_id}] saved to {each_frame_path}')

        return to_dir
Example #7
0
    def to_video_frame(self, *args, **kwargs) -> VideoFrame:
        # VideoFrame has `data`
        # SingleClassifierResult has `stage`
        with toolbox.video_capture(self.video_path) as cap:
            frame = toolbox.get_frame(cap, self.frame_id)
            compressed = toolbox.compress_frame(frame, *args, **kwargs)

        return VideoFrame(self.frame_id, self.timestamp, compressed)
Example #8
0
 def read_from_list(self,
                    data: typing.List[int],
                    video_cap: cv2.VideoCapture = None,
                    *_,
                    **__):
     cur_frame_id = toolbox.get_current_frame_id(video_cap)
     data = (toolbox.get_frame(video_cap, each) for each in data)
     toolbox.video_jump(video_cap, cur_frame_id)
     return data
Example #9
0
 def get_frames(self, frame_id_list: typing.List[int], *_,
                **__) -> typing.List[toolbox.VideoFrame]:
     """ return a list of VideoFrame, usually works with pick """
     out = list()
     with toolbox.video_capture(self.video.path) as cap:
         for each_id in frame_id_list:
             timestamp = toolbox.get_frame_time(cap, each_id)
             frame = toolbox.get_frame(cap, each_id)
             out.append(toolbox.VideoFrame(each_id, timestamp, frame))
     return out
Example #10
0
def test_get_frame():
    with toolbox.video_capture(VIDEO_PATH) as cap:
        first = 5
        second = 8

        actual = toolbox.get_frame_time(cap, first)
        should = toolbox.get_current_frame_time(cap)
        # should be frame 5
        assert actual == should

        # 5 -> 8 -> 5
        frame = toolbox.get_frame(cap, second, True)
        assert frame is not None
        assert toolbox.get_current_frame_id(cap) == first

        # 5 -> 8
        frame = toolbox.get_frame(cap, second)
        assert frame is not None
        assert toolbox.get_current_frame_id(cap) == second
Example #11
0
    def to_video_frame(self, *args, **kwargs) -> VideoFrame:
        # VideoFrame has `data`
        # SingleClassifierResult has `stage` (data is optional)

        # already have data
        if self.data is not None:
            return VideoFrame(self.frame_id, self.timestamp, self.data)
        # no data
        with toolbox.video_capture(self.video_path) as cap:
            frame = toolbox.get_frame(cap, self.frame_id)
            compressed = toolbox.compress_frame(frame, *args, **kwargs)
        return VideoFrame(self.frame_id, self.timestamp, compressed)
Example #12
0
    def pick_and_save(self,
                      range_list: typing.List[VideoCutRange],
                      frame_count: int,
                      to_dir: str = None,

                      # in kwargs
                      # compress_rate: float = None,
                      # target_size: typing.Tuple[int, int] = None,
                      # to_grey: bool = None,

                      *args, **kwargs) -> str:
        """
        pick some frames from range, and save them as files

        :param range_list: VideoCutRange list
        :param frame_count: default to 3, and finally you will get 3 frames for each range
        :param to_dir: will saved to this path
        :param args:
        :param kwargs:
        :return:
        """
        stage_list = list()
        for index, each_range in enumerate(range_list):
            picked = each_range.pick(frame_count, *args, **kwargs)
            logger.info(f'pick {picked} in range {each_range}')
            stage_list.append((index, picked))

        # create parent dir
        if not to_dir:
            to_dir = toolbox.get_timestamp_str()
        os.makedirs(to_dir, exist_ok=True)

        for each_stage_id, each_frame_list in stage_list:
            # create sub dir
            each_stage_dir = os.path.join(to_dir, str(each_stage_id))
            os.makedirs(each_stage_dir, exist_ok=True)

            with toolbox.video_capture(self.video_path) as cap:
                for each_frame_id in each_frame_list:
                    each_frame_path = os.path.join(each_stage_dir, f'{uuid.uuid4()}.png')
                    each_frame = toolbox.get_frame(cap, each_frame_id - 1)
                    each_frame = toolbox.compress_frame(each_frame, **kwargs)
                    cv2.imwrite(each_frame_path, each_frame)
                    logger.debug(f'frame [{each_frame_id}] saved to {each_frame_path}')

        return to_dir