Пример #1
0
    def classify(self,
                 video_path: str,
                 data_home: str,
                 output_path: str = None,
                 compress_rate: float = 0.2,
                 limit: int = None):
        # TODO model?

        cut_result_json = os.path.join(data_home, 'cut_result.json')

        res = None
        stable = None
        if os.path.isfile(cut_result_json):
            res = VideoCutResult.load(cut_result_json)
            stable, _ = res.get_range(limit=limit)

        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.add_dir_link(data_home)
        r.draw(
            classify_result,
            report_path=os.path.join(output_path or data_home, 'report.html'),
            cut_result=res,
        )
Пример #2
0
    def handle(self, video_path: str) -> bool:
        super(KerasHandler, self).handle(video_path)
        video = VideoObject(video_path)
        if self.preload:
            video.load_frames()

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video)
        stable, unstable = res.get_range(threshold=0.98, offset=3)

        # --- classify ---
        cl = KerasClassifier()
        if self.model_path:
            logger.info("load existed pre-train model")
            cl.load_model(self.model_path)
        else:
            data_home = res.pick_and_save(stable,
                                          self.frame_count,
                                          to_dir=self.result_path)
            cl.train(data_home)
        self.classifier_result = cl.classify(video, stable)

        # --- draw ---
        r = Reporter()
        r.draw(self.classifier_result, report_path=self.result_report_path)
        return True
Пример #3
0
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook(overwrite=True)
    hook2 = IgnoreHook(size=(0.5, 0.5), overwrite=True)
    frame_home = os.path.join(PROJECT_PATH, 'frame_save_dir')
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(
        size=(0.5, 0.5),
        offset=(0., 0.5),
        overwrite=True,
    )
    hook5 = RefineHook()
    hook6 = InvalidFrameDetectHook()
    hook7 = TemplateCompareHook({
        'amazon': IMAGE_PATH,
    })

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.8)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, 'count of stable range is not correct'

    data_home = res.pick_and_save(
        stable,
        5,
    )
    assert os.path.isdir(data_home), 'result dir not existed'

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, 'report.html')
    r.draw(
        classify_result,
        report_path=report_path,
        cut_result=res,
    )
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Пример #4
0
def test_cut_and_classify():
    model_name = "model1.pkl"
    report_name = "report.html"

    # test cut
    res, data_home = _cut(VIDEO_PATH)

    # test train
    _train(data_home, model_name)

    # test classify
    classify_result = _classify(VIDEO_PATH, data_home)
    classify_result_1 = _classify(VIDEO_PATH, data_home, model=model_name)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, report_name),
        cut_result=res,
    )
    r.draw(
        classify_result_1,
        report_path=data_home,
        cut_result=res,
    )
Пример #5
0
def _draw_report(res):
    r = Reporter()
    report_path = os.path.join(CUTTER_RESULT_DIR, 'report.html')
    r.draw(
        res,
        report_path=report_path,
    )
    assert os.path.isfile(report_path)
Пример #6
0
    def one_step(self,
                 video_path: str,
                 output_path: str = None,
                 threshold: float = 0.95,
                 frame_count: int = 5,
                 compress_rate: float = 0.2,
                 offset: int = 3,
                 limit: int = None):
        """
        one step => cut, classifier, draw

        :param video_path: your video path
        :param output_path: output path (dir)
        :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
        :param frame_count: default to 5, and finally you will get 5 frames for each range
        :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
        :param offset:
            it will change the way to decided whether two ranges can be merged
            before: first_range.end == second_range.start
            after: first_range.end + offset >= secord_range.start
        :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
        :return:
        """

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(
            threshold=threshold,
            limit=limit,
            offset=offset,
        )

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(data_home, 'cut_result.json')
        res.dump(res_json_path)

        # --- classify ---
        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.draw(
            classify_result,
            report_path=os.path.join(data_home, 'report.html'),
            cut_result=res,

            # kwargs of get_range
            # otherwise these thumbnails may become different
            threshold=threshold,
            limit=limit,
            offset=offset,
        )
Пример #7
0
def test_boost():
    video = VideoObject(VIDEO_PATH)
    video.load_frames()
    # test cut
    res, data_home = _cut(video)

    # test classify
    classify_result = _classify(video, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )
Пример #8
0
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook()
    hook2 = IgnoreHook(size=(0.5, 0.5))
    frame_home = os.path.join(PROJECT_PATH, "frame_save_dir")
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(size=(0.5, 0.5), offset=(0.0, 0.5))
    hook5 = RefineHook()
    hook6 = InterestPointHook()
    hook7 = TemplateCompareHook({"amazon": IMAGE_PATH})

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.9)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, "count of stable range is not correct"

    data_home = res.pick_and_save(stable, 5)
    assert os.path.isdir(data_home), "result dir not existed"

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, "report.html")
    r.draw(classify_result, report_path=report_path, cut_result=res)
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Пример #9
0
def test_cut_and_classify():
    # test cut
    res, data_home = cut(VIDEO_PATH)

    # test train
    train(data_home, "model1.pkl")

    # test classify
    classify_result = classify(VIDEO_PATH, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )
Пример #10
0
    def one_step(self,
                 video_path: str,
                 output_path: str = None,
                 threshold: float = 0.95,
                 frame_count: int = 5,
                 compress_rate: float = 0.2,
                 limit: int = None):
        """
        one step => cut, classifier, draw

        :param video_path: your video path
        :param output_path: output path (dir)
        :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
        :param frame_count: default to 5, and finally you will get 5 frames for each range
        :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
        :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
        :return:
        """

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(
            threshold=threshold,
            limit=limit,
        )

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(data_home, 'cut_result.json')
        res.dump(res_json_path)

        # --- classify ---
        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.add_dir_link(data_home)
        r.draw(
            classify_result,
            report_path=os.path.join(data_home, 'report.html'),
            cut_result=res,
        )
Пример #11
0
def test_save_and_load():
    cl = SVMClassifier()
    cl.load_model(MODEL_PATH)
    classify_result = cl.classify(VIDEO_PATH)

    result_file = "save.json"
    reporter = Reporter()
    reporter.add_extra("some_name", "some_value")
    reporter.save(result_file, classify_result)
    assert os.path.isfile(result_file)
    classify_result_after = Reporter.load(result_file)

    assert classify_result.get_length() == classify_result_after.get_length()
    for i, j in zip(classify_result.data, classify_result_after.data):
        assert i.to_dict() == j.to_dict()

    assert isinstance(reporter.get_stable_stage_sample(classify_result), np.ndarray)
    reporter.draw(classify_result)
Пример #12
0
def test_boost():
    video = VideoObject(VIDEO_PATH)
    video.load_frames()
    # test cut
    res, data_home = _cut(video)

    # test classify
    classify_result = _classify(video, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )

    # test compressing
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
        compress_rate=0.1,
    )

    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
        target_size=(600, 800),
    )
Пример #13
0
def analyse(
    video: typing.Union[str, VideoObject],
    output_path: str,
    pre_load: bool = True,
    threshold: float = 0.98,
    offset: int = 3,
    boost_mode: bool = True,
):
    """ designed for https://github.com/williamfzc/stagesepx/issues/123 """

    if isinstance(video, str):
        video = VideoObject(video, pre_load=pre_load)

    cutter = VideoCutter()
    res = cutter.cut(video)

    stable, unstable = res.get_range(
        threshold=threshold,
        offset=offset,
    )

    with tempfile.TemporaryDirectory() as temp_dir:
        res.pick_and_save(
            stable,
            5,
            to_dir=temp_dir,
        )

        cl = SVMClassifier()
        cl.load(temp_dir)
        cl.train()
        classify_result = cl.classify(video, stable, boost_mode=boost_mode)

    r = Reporter()
    r.draw(
        classify_result,
        report_path=output_path,
        unstable_ranges=unstable,
        cut_result=res,
    )
Пример #14
0
    def handle(self, video_path: str) -> bool:
        super(NormalHandler, self).handle(video_path)
        video = VideoObject(video_path)
        if self.preload:
            video.load_frames()

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video)
        stable, unstable = res.get_range(threshold=0.98, offset=3)
        data_home = res.pick_and_save(stable,
                                      self.frame_count,
                                      to_dir=self.result_path)

        # --- classify ---
        cl = SVMClassifier()
        cl.load(data_home)
        cl.train()
        self.classifier_result = cl.classify(video, stable)

        # --- draw ---
        r = Reporter()
        r.draw(self.classifier_result, report_path=self.result_report_path)
        return True
Пример #15
0
    cutter = VideoCutter()
    res = cutter.cut(each_video_path)
    stable = res.get_stable_range()
    data_home = res.pick_and_save(stable, 3)
    print(stable)

    # classify
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()

    # 注意,如果在classify方法指定了范围
    # 那么分析时只会分析处于范围内的帧!
    # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
    res = cl.classify(
        each_video_path,
        stable,
        # 步长,可以自行设置用于平衡效率与颗粒度
        # 默认为1,即每帧都检测
        step=1)

    # 为了更方便的可读性,stagesepx已经内置了图表绘制功能
    # 你可以直接把分析结果绘制成图表
    report = Reporter()
    # 你可以将把一些文件夹路径插入到报告中
    # 这样你可以很方便地从报告中查看各项相关内容
    # 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
    report.add_dir_link(data_home)

    report.draw(res)
Пример #16
0
    # 把别的视频也配置在这里即可
]

for each_video_path in video_list:
    cutter = VideoCutter()
    res = cutter.cut(each_video_path)
    stable = res.get_stable_range()
    data_home = res.pick_and_save(stable, 3)
    print(stable)

    # classify
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()

    # 注意,如果在classify方法指定了范围
    # 那么分析时只会分析处于范围内的帧!
    # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
    res = cl.classify(
        each_video_path,
        stable,
        # 步长,可以自行设置用于平衡效率与颗粒度
        # 默认为1,即每帧都检测
        step=1)

    # draw
    Reporter.draw(
        res,
        data_path=data_home,
    )
Пример #17
0
# 在加载数据完成之后需要先训练
cl.train()

# # 在训练后你可以把模型保存起来
# cl.save_model('model.pkl')
# # 或者直接读取已经训练好的模型
# cl.load_model('model.pkl')

# 开始分类
res = cl.classify(
    '../test.mp4',
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1,
)

# 为了更方便的可读性,stagesepx已经内置了图表绘制功能
# 你可以直接把分析结果绘制成图表
report = Reporter()

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
report.add_dir_link(data_home)

report.draw(
    res,
    report_path='report.html',
)
Пример #18
0
print(stage_1_list[-1].timestamp - stage_1_list[0].timestamp)
# 你也可以参考 report.py 中 calc_changing_cost 的实现

# --- draw ---
r = Reporter()

# 你可以将 thumbnail 直接嵌入到report中
# 如果不手动设定的话,report也会在报告中自动加入 thumbnail
# 但如此做,你需要在 draw函数 传入 与你的 get_range 相同的参数
# 否则自动提取的阶段会采用默认参数,可能会与你希望的不太一样
# 可以参考 cli.py 中的实现
for each in unstable:
    r.add_thumbnail(
        f'{each.start}({each.start_time}) - {each.end}({each.end_time}), '
        f'duration: {each.end_time - each.start_time}', res.thumbnail(each))

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
# r.add_dir_link(data_home)

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
r.draw(
    classify_result,
    report_path=os.path.join(data_home, 'report.html'),
    # 0.5.3新增的特性,多用于debug
    # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
    cut_result=res,
)
Пример #19
0
def one_step(
    video: typing.Union[str, VideoObject],
    output_path: str = None,
    threshold: float = 0.95,
    frame_count: int = 5,
    compress_rate: float = 0.2,
    target_size: typing.Tuple[int, int] = None,
    offset: int = 3,
    limit: int = None,
):
    """
    one step => cut, classifier, draw

    :param video: video path or object
    :param output_path: output path (dir)
    :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
    :param frame_count: default to 5, and finally you will get 5 frames for each range
    :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
    :param target_size: (100, 200)
    :param offset:
        it will change the way to decided whether two ranges can be merged
        before: first_range.end == second_range.start
        after: first_range.end + offset >= secord_range.start
    :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
    :return:
    """

    if isinstance(video, str):
        video = VideoObject(video)

    # --- cutter ---
    res, data_home = _cut(
        video,
        output_path,
        threshold=threshold,
        frame_count=frame_count,
        compress_rate=compress_rate,
        target_size=target_size,
        offset=offset,
        limit=limit,
    )
    stable, _ = res.get_range(threshold=threshold, limit=limit, offset=offset)

    # --- classify ---
    classify_result = _classify(
        video,
        data_home=data_home,
        compress_rate=compress_rate,
        target_size=target_size,
        limit_range=stable,
    )

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, constants.REPORT_FILE_NAME),
        cut_result=res,
        # kwargs of get_range
        # otherwise these thumbnails may become different
        threshold=threshold,
        limit=limit,
        offset=offset,
    )
Пример #20
0
def run(config: typing.Union[dict, str]):
    """
    run with config

    :param config: config file path, or a preload dict
    :return:
    """
    class _VideoUserConfig(BaseModel):
        path: str
        pre_load: bool = True
        fps: int = None

    class _CutterUserConfig(BaseModel):
        threshold: float = None
        frame_count: int = None
        offset: int = None
        limit: int = None
        block: int = None

        # common
        compress_rate: float = None
        target_size: typing.Tuple[int, int] = None

    class _ClassifierType(Enum):
        SVM = "svm"
        KERAS = "keras"

    class _ClassifierUserConfig(BaseModel):
        boost_mode: bool = None
        classifier_type: _ClassifierType = _ClassifierType.SVM
        model: str = None

        # common
        compress_rate: float = None
        target_size: typing.Tuple[int, int] = None

    class _CalcOperatorType(Enum):
        BETWEEN = "between"
        DISPLAY = "display"

    class _CalcOperator(BaseModel):
        name: str
        calc_type: _CalcOperatorType
        args: dict = dict()

    class _CalcUserConfig(BaseModel):
        output: str = None
        ignore_error: bool = None
        operators: typing.List[_CalcOperator] = None

    class _ExtraUserConfig(BaseModel):
        save_train_set: str = None

    class UserConfig(BaseModel):
        output: str
        video: _VideoUserConfig
        cutter: _CutterUserConfig = _CutterUserConfig()
        classifier: _ClassifierUserConfig = _ClassifierUserConfig()
        calc: _CalcUserConfig = _CalcUserConfig()
        extras: _ExtraUserConfig = _ExtraUserConfig()

    if isinstance(config, str):
        # path
        config_path = pathlib.Path(config)
        assert config_path.is_file(), f"no config file found in {config_path}"

        # todo: support different types in the future
        assert config_path.as_posix().endswith(
            ".json"), "config file should be json format"
        with open(config_path, encoding=constants.CHARSET) as f:
            config = json.load(f)

    config = UserConfig(**config)
    logger.info(f"config: {config}")

    # main flow
    video = VideoObject(
        # fmt: off
        path=config.video.path,
        fps=config.video.fps,
    )
    if config.video.pre_load:
        video.load_frames()

    # cut
    cutter = VideoCutter(
        # fmt: off
        compress_rate=config.cutter.compress_rate,
        target_size=config.cutter.target_size,
    )
    res = cutter.cut(
        # fmt: off
        video=video,
        block=config.cutter.block,
    )
    stable, unstable = res.get_range(
        # fmt: off
        threshold=config.cutter.threshold,
        offset=config.cutter.offset,
    )

    with tempfile.TemporaryDirectory() as temp_dir:
        # classify
        if config.classifier.classifier_type is _ClassifierType.SVM:
            cl = SVMClassifier(
                # fmt: off
                compress_rate=config.classifier.compress_rate,
                target_size=config.classifier.target_size,
            )
        elif config.classifier.classifier_type is _ClassifierType.KERAS:
            from stagesepx.classifier.keras import KerasClassifier

            cl = KerasClassifier(
                # fmt: off
                compress_rate=config.classifier.compress_rate,
                target_size=config.classifier.target_size,
            )
        # validation has been applied by pydantic
        # so no `else`

        if config.classifier.model:
            # no need to retrain
            model_path = pathlib.Path(config.classifier.model)
            assert model_path.is_file(), f"file {model_path} not existed"
            cl.load_model(model_path)
        else:
            # train a new model
            train_set_dir = config.extras.save_train_set or temp_dir
            os.makedirs(train_set_dir, exist_ok=True)

            res.pick_and_save(
                # fmt: off
                stable,
                frame_count=config.cutter.frame_count,
                to_dir=train_set_dir,
            )
            cl.train(data_path=train_set_dir)

    # start classifying
    classify_result = cl.classify(
        # fmt: off
        video,
        stable,
        boost_mode=config.classifier.boost_mode,
    )

    # calc
    def _calc_display() -> dict:
        # jsonify
        return json.loads(classify_result.dumps())

    def _calc_between(*, from_stage: str = None, to_stage: str = None) -> dict:
        assert classify_result.contain(
            from_stage), f"no stage {from_stage} found in result"
        assert classify_result.contain(
            to_stage), f"no stage {to_stage} found in result"
        from_frame = classify_result.last(from_stage)
        to_frame = classify_result.first(to_stage)
        cost = to_frame.timestamp - from_frame.timestamp
        return {
            "from": from_frame.frame_id,
            "to": to_frame.frame_id,
            "cost": cost,
        }

    _calc_func_dict = {
        _CalcOperatorType.BETWEEN: _calc_between,
        _CalcOperatorType.DISPLAY: _calc_display,
    }
    calc_output = config.calc.output
    if calc_output:
        output_path = pathlib.Path(calc_output)
        assert not output_path.is_file(), f"file {output_path} already existed"
        result = []
        for each_calc in config.calc.operators:
            func = _calc_func_dict[each_calc.calc_type]
            try:
                func_ret = func(**each_calc.args)
            except Exception as e:
                if not config.calc.ignore_error:
                    raise
                logger.warning(e)
                func_ret = traceback.format_exc()
            calc_ret = {
                "name": each_calc.name,
                "type": each_calc.calc_type.value,
                "result": func_ret,
            }
            result.append(calc_ret)
        with open(output_path, "w", encoding=constants.CHARSET) as f:
            json.dump(result, f)

    # draw
    r = Reporter()
    r.draw(
        # fmt: off
        classify_result,
        report_path=config.output,
    )
Пример #21
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SSIMClassifier
from stagesepx.reporter import Reporter

# cut
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable = res.get_stable_range()

# classify
cl = SSIMClassifier()
cl.load(stable)

res = cl.classify(
    video_path,
    stable,
)

# draw
r = Reporter()
r.draw(res)
Пример #22
0
cl = SVMClassifier(feature_type='hog')

# 基本与SSIM分类器的流程一致
# 但它对数据的要求可能有所差别,具体参见 cut.py 中的描述
data_home = './cut_result'
cl.load(data_home)

# 在加载数据完成之后需要先训练
cl.train()

# # 在训练后你可以把模型保存起来
# cl.save_model('model.pkl')
# # 或者直接读取已经训练好的模型
# cl.load_model('model.pkl')

# 开始分类
res = cl.classify(
    '../test.mp4',
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1
)

Reporter.draw(
    res,
    report_path='report.html',

    # 在结果报告中展示stage对应的图片
    data_path=data_home,
)
Пример #23
0
# 可以直接进入 ClassifyResult 对象中查看
cr_dict = classify_result.to_dict()
print(cr_dict)

# contain_image (>=0.9.1)
# 你可以利用模板匹配,对最终结果与你的预期进行对比,从而得知阶段是否与你的期望相符
# 全自动化的校验可以在此基础上展开
# res = data_list[0].contain_image(image_path="path/to/your/template/path")
# print(res)
# 你可以得到类似这样的结果:
# {'target_point': [550, 915], 'target_sim': 0.9867244362831116, 'ok': True}

# --- draw ---
r = Reporter()

# 你可以将把一些自定义数据插入到报告中
r.add_extra("data_home", data_home)

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
r.draw(
    classify_result,
    report_path=os.path.join(data_home, "report.html"),
    # 传入 unstable 可以将对应部分标记为 unstable
    # 会影响最终的分析结果
    unstable_ranges=unstable,
    # 0.5.3新增的特性,多用于debug
    # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
    cut_result=res,
)
Пример #24
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SSIMClassifier
from stagesepx.reporter import Reporter

# cut
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable = res.get_stable_range()

# classify
cl = SSIMClassifier()
cl.load(stable)

res = cl.classify(
    video_path,
    stable,
)

# draw
Reporter.draw(res)
Пример #25
0
from stagesepx.classifier import SVMClassifier
from stagesepx.cutter import VideoCutter
from stagesepx.reporter import Reporter

TARGET_VIDEO = '../../demo.mp4'

# cut
# 这里依旧使用了 cut,主要目的还是为了可以比较好的处理变化中的过程
# 但这次我们不需要用到 pick_and_save,因为这次 classifier 不会使用 cutter 的数据
cutter = VideoCutter()
res = cutter.cut(TARGET_VIDEO)
stable, _ = res.get_range()

# classify
# 这里的参数需要保持与train.py一致,如果你有改动的话
cl = SVMClassifier()
cl.load_model('./model.pkl')

classify_result = cl.classify(
    TARGET_VIDEO,
    stable,
)

r = Reporter()
r.draw(
    classify_result,
    report_path='report.html',
    cut_result=res,
)
Пример #26
0
    print(each.frame_id)
    # 它的时间戳
    print(each.timestamp)
    # 它被划分为什么类型
    print(each.stage)
    break

# --- draw ---
r = Reporter()
r.add_dir_link(data_home)

# 你可以将 thumbnail 直接嵌入到report中
for each in unstable:
    r.add_thumbnail(
        f'{each.start}({each.start_time}) - {each.end}({each.end_time})',
        res.thumbnail(each))

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
# r.add_dir_link(data_home)

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
r.draw(
    classify_result,
    # 0.5.3新增的特性,多用于debug
    # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
    cut_result=res,
)
Пример #27
0
# cut
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable = res.get_stable_range()
data_home = res.pick_and_save(stable, 3)
print(stable)

# classify
cl = SVMClassifier()
cl.load(data_home)
cl.train()

# 注意,如果在classify方法指定了范围
# 那么分析时只会分析处于范围内的帧!
# 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
res = cl.classify(
    video_path,
    stable,
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1)

# draw
Reporter.draw(
    res,
    report_path=f'{data_home}/report.html',
    data_path=data_home,
)
Пример #28
0
def write_result_to_local(_video_file_name, _from_movie_2_picture,
                          _result_dict, _classify_result):
    # 待写入csv的一行数据
    result_row = []
    # print(re.search(r'\\(.*).mp4', str(i), re.M | re.I).group(1))
    mp4_filename = re.search(r'\\(.*).mp4', str(_video_file_name),
                             re.M | re.I).group(1)

    # 打印结果
    # print(result_dict.keys())
    # print(result_dict['0'][-1][-1])
    # <ClassifierResult stage=0 frame_id=99 timestamp=3.2666666666666666>
    # pprint.pprint(result_dict)

    # 将结果写本地
    txt_html_path = _from_movie_2_picture + '/forecast_stable_' + mp4_filename + '/' + mp4_filename
    f = open(txt_html_path + '.txt', 'a+')
    f.write(str(_result_dict).replace(', ', ',\n'))

    # 处理结果
    result_row.append(mp4_filename + '.mp4')

    # --- draw ---
    r = Reporter()
    r.draw(_classify_result, txt_html_path + '.html')

    # 计算结果
    # 用['-3'][0][0]表示用户点击行为
    if '-3' in _result_dict.keys() and len(_result_dict['-3']) > 0:
        search_obj1 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['-3'][0][0]), re.M | re.I)
        # print('完成点击图标的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj1.group(1), str(search_obj1.group(2))))
        result_row.append(str(search_obj1.group(2)))
        result_row.append(str(search_obj1.group(1)))
    else:
        # print("未找到用户点击完图标的时间点")
        result_row.append('None')
        result_row.append('None')

    # 有时候,用['1'][0][0]表示用户点击行为
    if '1' in _result_dict.keys() and len(_result_dict['1']) > 0:
        search_obj2 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['1'][0][0]), re.M | re.I)
        # print('开始点击的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj1.group(1), str(search_obj1.group(2))))
        result_row.append(str(search_obj2.group(2)))
        result_row.append(str(search_obj2.group(1)))
    else:
        # print("未找到开始点击的时间点")
        result_row.append('None')
        result_row.append('None')

    # 进入目标页面
    if '4' in _result_dict.keys() and len(_result_dict['4']) > 0:
        search_obj3 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['4'][0][0]), re.M | re.I)
        # print('缓冲结束,进入目标页面的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj3.group(1), search_obj3.group(2)))
        result_row.append(str(search_obj3.group(2)))
        result_row.append(str(search_obj3.group(1)))
    else:
        # print("未找到进入目标页面的时间点")
        result_row.append('None')
        result_row.append('None')

    return result_row
Пример #29
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SVMClassifier
from stagesepx.reporter import Reporter
from stagesepx.video import VideoObject


video_path = "../videos/long.mp4"
video = VideoObject(video_path)
video.load_frames()

# --- cutter ---
cutter = VideoCutter()
res = cutter.cut(video)
stable, unstable = res.get_range()
data_home = res.pick_and_save(stable, 5)

# --- classify ---
cl = SVMClassifier(compress_rate=0.4)
cl.load(data_home)
cl.train()
classify_result = cl.classify(video, stable, keep_data=True)
result_dict = classify_result.to_dict()

# --- draw ---
r = Reporter()
r.draw(classify_result)
Пример #30
0
video = VideoObject(file_name)

# 新建帧,计算视频总共多少帧,每帧多少ms
video.load_frames()

# 压缩视频
cutter = VideoCutter()

# 计算每一帧视频的每一个block的ssim和psnr
res = cutter.cut(video)

# 判断A帧到B帧之间是否稳定还是不稳定
stable, unstable = res.get_range()

# 把分类好的稳定阶段图片保存本地
res.pick_and_save(stable, 20, to_dir='./forecast_frame', meaningful_name=True)

# 把切分号的稳定区间,进行归类
classify_result = cl.classify(file_name, stable, keep_data=True)
result_dict = classify_result.to_dict()

# 打印结果
print(result_dict)
with open('./result.txt', 'w') as f:
    f.write(str(result_dict))

# 输出HTML报告
r = Reporter()
r.draw(classify_result, './result.html')
# TODO: 时间计算 = 3[0]-0[-1]