Example #1
0
    def classify(self,
                 video_path: str,
                 data_home: str,
                 output_path: str = None,
                 compress_rate: float = 0.2,
                 offset: int = 3,
                 limit: int = None):
        # TODO model?

        cut_result_json = os.path.join(data_home, 'cut_result.json')

        res = None
        stable = None
        if os.path.isfile(cut_result_json):
            res = VideoCutResult.load(cut_result_json)
            stable, _ = res.get_range(offset=offset, limit=limit)

        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.draw(
            classify_result,
            report_path=os.path.join(output_path or data_home, 'report.html'),
            cut_result=res,
        )
Example #2
0
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook(overwrite=True)
    hook2 = IgnoreHook(size=(0.5, 0.5), overwrite=True)
    frame_home = os.path.join(PROJECT_PATH, 'frame_save_dir')
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(
        size=(0.5, 0.5),
        offset=(0., 0.5),
        overwrite=True,
    )
    hook5 = RefineHook()
    hook6 = InvalidFrameDetectHook()
    hook7 = TemplateCompareHook({
        'amazon': IMAGE_PATH,
    })

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.8)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, 'count of stable range is not correct'

    data_home = res.pick_and_save(
        stable,
        5,
    )
    assert os.path.isdir(data_home), 'result dir not existed'

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, 'report.html')
    r.draw(
        classify_result,
        report_path=report_path,
        cut_result=res,
    )
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Example #3
0
    def handle(self, video_path: str) -> bool:
        super(KerasHandler, self).handle(video_path)
        video = VideoObject(video_path)
        if self.preload:
            video.load_frames()

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video)
        stable, unstable = res.get_range(threshold=0.98, offset=3)

        # --- classify ---
        cl = KerasClassifier()
        if self.model_path:
            logger.info("load existed pre-train model")
            cl.load_model(self.model_path)
        else:
            data_home = res.pick_and_save(stable,
                                          self.frame_count,
                                          to_dir=self.result_path)
            cl.train(data_home)
        self.classifier_result = cl.classify(video, stable)

        # --- draw ---
        r = Reporter()
        r.draw(self.classifier_result, report_path=self.result_report_path)
        return True
Example #4
0
def _draw_report(res):
    r = Reporter()
    report_path = os.path.join(CUTTER_RESULT_DIR, 'report.html')
    r.draw(
        res,
        report_path=report_path,
    )
    assert os.path.isfile(report_path)
Example #5
0
    def one_step(self,
                 video_path: str,
                 output_path: str = None,
                 threshold: float = 0.95,
                 frame_count: int = 5,
                 compress_rate: float = 0.2,
                 offset: int = 3,
                 limit: int = None):
        """
        one step => cut, classifier, draw

        :param video_path: your video path
        :param output_path: output path (dir)
        :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
        :param frame_count: default to 5, and finally you will get 5 frames for each range
        :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
        :param offset:
            it will change the way to decided whether two ranges can be merged
            before: first_range.end == second_range.start
            after: first_range.end + offset >= secord_range.start
        :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
        :return:
        """

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(
            threshold=threshold,
            limit=limit,
            offset=offset,
        )

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(data_home, 'cut_result.json')
        res.dump(res_json_path)

        # --- classify ---
        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.draw(
            classify_result,
            report_path=os.path.join(data_home, 'report.html'),
            cut_result=res,

            # kwargs of get_range
            # otherwise these thumbnails may become different
            threshold=threshold,
            limit=limit,
            offset=offset,
        )
def test_save_and_load():
    cl = SVMClassifier()
    cl.load_model(MODEL_PATH)
    classify_result = cl.classify(VIDEO_PATH)

    result_file = "save.json"
    reporter = Reporter()
    reporter.save(result_file, classify_result)
    assert os.path.isfile(result_file)
    classify_result_after = Reporter.load(result_file)

    assert len(classify_result) == len(classify_result_after)
    for i, j in zip(classify_result, classify_result_after):
        assert i.to_dict() == j.to_dict()
Example #7
0
def test_boost():
    video = VideoObject(VIDEO_PATH)
    video.load_frames()
    # test cut
    res, data_home = _cut(video)

    # test classify
    classify_result = _classify(video, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook()
    hook2 = IgnoreHook(size=(0.5, 0.5))
    frame_home = os.path.join(PROJECT_PATH, "frame_save_dir")
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(size=(0.5, 0.5), offset=(0.0, 0.5))
    hook5 = RefineHook()
    hook6 = InterestPointHook()
    hook7 = TemplateCompareHook({"amazon": IMAGE_PATH})

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.9)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, "count of stable range is not correct"

    data_home = res.pick_and_save(stable, 5)
    assert os.path.isdir(data_home), "result dir not existed"

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, "report.html")
    r.draw(classify_result, report_path=report_path, cut_result=res)
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Example #9
0
def test_cut_and_classify():
    # test cut
    res, data_home = cut(VIDEO_PATH)

    # test train
    train(data_home, "model1.pkl")

    # test classify
    classify_result = classify(VIDEO_PATH, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )
Example #10
0
def test_save_and_load():
    cl = SVMClassifier()
    cl.load_model(MODEL_PATH)
    classify_result = cl.classify(VIDEO_PATH)

    result_file = "save.json"
    reporter = Reporter()
    reporter.add_extra("some_name", "some_value")
    reporter.save(result_file, classify_result)
    assert os.path.isfile(result_file)
    classify_result_after = Reporter.load(result_file)

    assert classify_result.get_length() == classify_result_after.get_length()
    for i, j in zip(classify_result.data, classify_result_after.data):
        assert i.to_dict() == j.to_dict()

    assert isinstance(reporter.get_stable_stage_sample(classify_result), np.ndarray)
    reporter.draw(classify_result)
Example #11
0
def test_cut_and_classify():
    model_name = "model1.pkl"
    report_name = "report.html"

    # test cut
    res, data_home = _cut(VIDEO_PATH)

    # test train
    _train(data_home, model_name)

    # test classify
    classify_result = _classify(VIDEO_PATH, data_home)
    classify_result_1 = _classify(VIDEO_PATH, data_home, model=model_name)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, report_name),
        cut_result=res,
    )
    r.draw(
        classify_result_1,
        report_path=data_home,
        cut_result=res,
    )
Example #12
0
def analyse(
    video: typing.Union[str, VideoObject],
    output_path: str,
    pre_load: bool = True,
    threshold: float = 0.98,
    offset: int = 3,
    boost_mode: bool = True,
):
    """ designed for https://github.com/williamfzc/stagesepx/issues/123 """

    if isinstance(video, str):
        video = VideoObject(video, pre_load=pre_load)

    cutter = VideoCutter()
    res = cutter.cut(video)

    stable, unstable = res.get_range(
        threshold=threshold,
        offset=offset,
    )

    with tempfile.TemporaryDirectory() as temp_dir:
        res.pick_and_save(
            stable,
            5,
            to_dir=temp_dir,
        )

        cl = SVMClassifier()
        cl.load(temp_dir)
        cl.train()
        classify_result = cl.classify(video, stable, boost_mode=boost_mode)

    r = Reporter()
    r.draw(
        classify_result,
        report_path=output_path,
        unstable_ranges=unstable,
        cut_result=res,
    )
Example #13
0
    def handle(self, video_path: str) -> bool:
        super(NormalHandler, self).handle(video_path)
        video = VideoObject(video_path)
        if self.preload:
            video.load_frames()

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video)
        stable, unstable = res.get_range(threshold=0.98, offset=3)
        data_home = res.pick_and_save(stable,
                                      self.frame_count,
                                      to_dir=self.result_path)

        # --- classify ---
        cl = SVMClassifier()
        cl.load(data_home)
        cl.train()
        self.classifier_result = cl.classify(video, stable)

        # --- draw ---
        r = Reporter()
        r.draw(self.classifier_result, report_path=self.result_report_path)
        return True
Example #14
0
    def one_step(self,
                 video_path: str,
                 output_path: str = None,
                 threshold: float = 0.95,
                 frame_count: int = 5,
                 compress_rate: float = 0.2,
                 limit: int = None):
        """
        one step => cut, classifier, draw

        :param video_path: your video path
        :param output_path: output path (dir)
        :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
        :param frame_count: default to 5, and finally you will get 5 frames for each range
        :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
        :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
        :return:
        """

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(
            threshold=threshold,
            limit=limit,
        )

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(data_home, 'cut_result.json')
        res.dump(res_json_path)

        # --- classify ---
        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.add_dir_link(data_home)
        r.draw(
            classify_result,
            report_path=os.path.join(data_home, 'report.html'),
            cut_result=res,
        )
Example #15
0
# 在加载数据完成之后需要先训练
cl.train()

# # 在训练后你可以把模型保存起来
# cl.save_model('model.pkl')
# # 或者直接读取已经训练好的模型
# cl.load_model('model.pkl')

# 开始分类
res = cl.classify(
    '../test.mp4',
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1,
)

# 为了更方便的可读性,stagesepx已经内置了图表绘制功能
# 你可以直接把分析结果绘制成图表
report = Reporter()

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
report.add_dir_link(data_home)

report.draw(
    res,
    report_path='report.html',
)
Example #16
0
data_home = res.pick_and_save(stable, 3)

# classify
cl = SVMClassifier()
cl.load(data_home)
cl.train()

# 注意,如果在classify方法指定了范围
# 那么分析时只会分析处于范围内的帧!
# 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
classify_result = cl.classify(
    video_path,
    stable,
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1)

# draw
r = Reporter()
r.add_dir_link(data_home)

# 你可以将 thumbnail 直接嵌入到report中
for each in unstable:
    r.add_thumbnail(
        f'{each.start}({each.start_time}) - {each.end}({each.end_time})',
        res.thumbnail(each))

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
r.draw(classify_result)
Example #17
0
for each in classify_result:
    # 它的帧编号
    print(each.frame_id)
    # 它的时间戳
    print(each.timestamp)
    # 它被划分为什么类型
    print(each.stage)
    break
# 例如,将阶段1的部分提取出来?
stage_1_list = [each for each in classify_result if each.stage == '1']
# 然后计算出它的范围
print(stage_1_list[-1].timestamp - stage_1_list[0].timestamp)
# 你也可以参考 report.py 中 calc_changing_cost 的实现

# --- draw ---
r = Reporter()

# 你可以将 thumbnail 直接嵌入到report中
# 如果不手动设定的话,report也会在报告中自动加入 thumbnail
# 但如此做,你需要在 draw函数 传入 与你的 get_range 相同的参数
# 否则自动提取的阶段会采用默认参数,可能会与你希望的不太一样
# 可以参考 cli.py 中的实现
for each in unstable:
    r.add_thumbnail(
        f'{each.start}({each.start_time}) - {each.end}({each.end_time}), '
        f'duration: {each.end_time - each.start_time}', res.thumbnail(each))

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
# r.add_dir_link(data_home)
Example #18
0
def one_step(
    video: typing.Union[str, VideoObject],
    output_path: str = None,
    threshold: float = 0.95,
    frame_count: int = 5,
    compress_rate: float = 0.2,
    target_size: typing.Tuple[int, int] = None,
    offset: int = 3,
    limit: int = None,
):
    """
    one step => cut, classifier, draw

    :param video: video path or object
    :param output_path: output path (dir)
    :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
    :param frame_count: default to 5, and finally you will get 5 frames for each range
    :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
    :param target_size: (100, 200)
    :param offset:
        it will change the way to decided whether two ranges can be merged
        before: first_range.end == second_range.start
        after: first_range.end + offset >= secord_range.start
    :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
    :return:
    """

    if isinstance(video, str):
        video = VideoObject(video)

    # --- cutter ---
    res, data_home = _cut(
        video,
        output_path,
        threshold=threshold,
        frame_count=frame_count,
        compress_rate=compress_rate,
        target_size=target_size,
        offset=offset,
        limit=limit,
    )
    stable, _ = res.get_range(threshold=threshold, limit=limit, offset=offset)

    # --- classify ---
    classify_result = _classify(
        video,
        data_home=data_home,
        compress_rate=compress_rate,
        target_size=target_size,
        limit_range=stable,
    )

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, constants.REPORT_FILE_NAME),
        cut_result=res,
        # kwargs of get_range
        # otherwise these thumbnails may become different
        threshold=threshold,
        limit=limit,
        offset=offset,
    )
Example #19
0
from stagesepx.classifier import SVMClassifier
from stagesepx.cutter import VideoCutter
from stagesepx.reporter import Reporter

TARGET_VIDEO = '../../demo.mp4'

# cut
# 这里依旧使用了 cut,主要目的还是为了可以比较好的处理变化中的过程
# 但这次我们不需要用到 pick_and_save,因为这次 classifier 不会使用 cutter 的数据
cutter = VideoCutter()
res = cutter.cut(TARGET_VIDEO)
stable, _ = res.get_range()

# classify
# 这里的参数需要保持与train.py一致,如果你有改动的话
cl = SVMClassifier()
cl.load_model('./model.pkl')

classify_result = cl.classify(
    TARGET_VIDEO,
    stable,
)

r = Reporter()
r.draw(
    classify_result,
    report_path='report.html',
    cut_result=res,
)
Example #20
0
    cutter = VideoCutter()
    res = cutter.cut(each_video_path)
    stable = res.get_stable_range()
    data_home = res.pick_and_save(stable, 3)
    print(stable)

    # classify
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()

    # 注意,如果在classify方法指定了范围
    # 那么分析时只会分析处于范围内的帧!
    # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
    res = cl.classify(
        each_video_path,
        stable,
        # 步长,可以自行设置用于平衡效率与颗粒度
        # 默认为1,即每帧都检测
        step=1)

    # 为了更方便的可读性,stagesepx已经内置了图表绘制功能
    # 你可以直接把分析结果绘制成图表
    report = Reporter()
    # 你可以将把一些文件夹路径插入到报告中
    # 这样你可以很方便地从报告中查看各项相关内容
    # 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
    report.add_dir_link(data_home)

    report.draw(res)
Example #21
0
# cut
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable = res.get_stable_range()
data_home = res.pick_and_save(stable, 3)
print(stable)

# classify
cl = SVMClassifier()
cl.load(data_home)
cl.train()

# 注意,如果在classify方法指定了范围
# 那么分析时只会分析处于范围内的帧!
# 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
res = cl.classify(
    video_path,
    stable,
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1)

# draw
Reporter.draw(
    res,
    report_path=f'{data_home}/report.html',
    data_path=data_home,
)
Example #22
0
    step=1,
)

# 分类出来的结果是一个 list,里面包含 ClassifierResult 对象
# 你可以用它进行二次开发
for each in classify_result:
    # 它的帧编号
    print(each.frame_id)
    # 它的时间戳
    print(each.timestamp)
    # 它被划分为什么类型
    print(each.stage)
    break

# --- draw ---
r = Reporter()
r.add_dir_link(data_home)

# 你可以将 thumbnail 直接嵌入到report中
for each in unstable:
    r.add_thumbnail(
        f'{each.start}({each.start_time}) - {each.end}({each.end_time})',
        res.thumbnail(each))

# 你可以将把一些文件夹路径插入到报告中
# 这样你可以很方便地从报告中查看各项相关内容
# 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
# r.add_dir_link(data_home)

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
Example #23
0
def write_result_to_local(_video_file_name, _from_movie_2_picture,
                          _result_dict, _classify_result):
    # 待写入csv的一行数据
    result_row = []
    # print(re.search(r'\\(.*).mp4', str(i), re.M | re.I).group(1))
    mp4_filename = re.search(r'\\(.*).mp4', str(_video_file_name),
                             re.M | re.I).group(1)

    # 打印结果
    # print(result_dict.keys())
    # print(result_dict['0'][-1][-1])
    # <ClassifierResult stage=0 frame_id=99 timestamp=3.2666666666666666>
    # pprint.pprint(result_dict)

    # 将结果写本地
    txt_html_path = _from_movie_2_picture + '/forecast_stable_' + mp4_filename + '/' + mp4_filename
    f = open(txt_html_path + '.txt', 'a+')
    f.write(str(_result_dict).replace(', ', ',\n'))

    # 处理结果
    result_row.append(mp4_filename + '.mp4')

    # --- draw ---
    r = Reporter()
    r.draw(_classify_result, txt_html_path + '.html')

    # 计算结果
    # 用['-3'][0][0]表示用户点击行为
    if '-3' in _result_dict.keys() and len(_result_dict['-3']) > 0:
        search_obj1 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['-3'][0][0]), re.M | re.I)
        # print('完成点击图标的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj1.group(1), str(search_obj1.group(2))))
        result_row.append(str(search_obj1.group(2)))
        result_row.append(str(search_obj1.group(1)))
    else:
        # print("未找到用户点击完图标的时间点")
        result_row.append('None')
        result_row.append('None')

    # 有时候,用['1'][0][0]表示用户点击行为
    if '1' in _result_dict.keys() and len(_result_dict['1']) > 0:
        search_obj2 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['1'][0][0]), re.M | re.I)
        # print('开始点击的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj1.group(1), str(search_obj1.group(2))))
        result_row.append(str(search_obj2.group(2)))
        result_row.append(str(search_obj2.group(1)))
    else:
        # print("未找到开始点击的时间点")
        result_row.append('None')
        result_row.append('None')

    # 进入目标页面
    if '4' in _result_dict.keys() and len(_result_dict['4']) > 0:
        search_obj3 = re.search(r'frame_id=(.*) timestamp=(.*)>',
                                str(_result_dict['4'][0][0]), re.M | re.I)
        # print('缓冲结束,进入目标页面的帧数为第 %s 帧,时间点为第 %s 秒' % (search_obj3.group(1), search_obj3.group(2)))
        result_row.append(str(search_obj3.group(2)))
        result_row.append(str(search_obj3.group(1)))
    else:
        # print("未找到进入目标页面的时间点")
        result_row.append('None')
        result_row.append('None')

    return result_row
Example #24
0
def test_boost():
    video = VideoObject(VIDEO_PATH)
    video.load_frames()
    # test cut
    res, data_home = _cut(video)

    # test classify
    classify_result = _classify(video, data_home)

    # --- draw ---
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
    )

    # test compressing
    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
        compress_rate=0.1,
    )

    r = Reporter()
    r.draw(
        classify_result,
        report_path=os.path.join(data_home, "report.html"),
        cut_result=res,
        target_size=(600, 800),
    )
Example #25
0
    # 把别的视频也配置在这里即可
]

for each_video_path in video_list:
    cutter = VideoCutter()
    res = cutter.cut(each_video_path)
    stable = res.get_stable_range()
    data_home = res.pick_and_save(stable, 3)
    print(stable)

    # classify
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()

    # 注意,如果在classify方法指定了范围
    # 那么分析时只会分析处于范围内的帧!
    # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
    res = cl.classify(
        each_video_path,
        stable,
        # 步长,可以自行设置用于平衡效率与颗粒度
        # 默认为1,即每帧都检测
        step=1)

    # draw
    Reporter.draw(
        res,
        data_path=data_home,
    )
cl = SVMClassifier(feature_type='hog')

# 基本与SSIM分类器的流程一致
# 但它对数据的要求可能有所差别,具体参见 cut.py 中的描述
data_home = './cut_result'
cl.load(data_home)

# 在加载数据完成之后需要先训练
cl.train()

# # 在训练后你可以把模型保存起来
# cl.save_model('model.pkl')
# # 或者直接读取已经训练好的模型
# cl.load_model('model.pkl')

# 开始分类
res = cl.classify(
    '../test.mp4',
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1
)

Reporter.draw(
    res,
    report_path='report.html',

    # 在结果报告中展示stage对应的图片
    data_path=data_home,
)
Example #27
0
print(data_list)
# classify_result 已经提供了许多方法用于更好地重整数据
# 可以直接进入 ClassifyResult 对象中查看
cr_dict = classify_result.to_dict()
print(cr_dict)

# contain_image (>=0.9.1)
# 你可以利用模板匹配,对最终结果与你的预期进行对比,从而得知阶段是否与你的期望相符
# 全自动化的校验可以在此基础上展开
# res = data_list[0].contain_image(image_path="path/to/your/template/path")
# print(res)
# 你可以得到类似这样的结果:
# {'target_point': [550, 915], 'target_sim': 0.9867244362831116, 'ok': True}

# --- draw ---
r = Reporter()

# 你可以将把一些自定义数据插入到报告中
r.add_extra("data_home", data_home)

# 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
# r.add_extra('here is title', 'here is content')
r.draw(
    classify_result,
    report_path=os.path.join(data_home, "report.html"),
    # 传入 unstable 可以将对应部分标记为 unstable
    # 会影响最终的分析结果
    unstable_ranges=unstable,
    # 0.5.3新增的特性,多用于debug
    # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
    cut_result=res,
Example #28
0
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable, unstable = res.get_range()
data_home = res.pick_and_save(stable, 5)

# classify
cl = SVMClassifier()
cl.load(data_home)
cl.train()

# 注意,如果在classify方法指定了范围
# 那么分析时只会分析处于范围内的帧!
# 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
classify_result = cl.classify(
    video_path,
    stable,
    # 步长,可以自行设置用于平衡效率与颗粒度
    # 默认为1,即每帧都检测
    step=1)

# draw
r = Reporter()
r.add_dir_link(data_home)

# 你可以将 thumbnail 直接嵌入到report中
for index, each in enumerate(unstable):
    r.add_thumbnail(f'unstable stage {index}', res.thumbnail(each))

r.draw(classify_result)
Example #29
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SVMClassifier
from stagesepx.reporter import Reporter
from stagesepx.video import VideoObject


video_path = "../videos/long.mp4"
video = VideoObject(video_path)
video.load_frames()

# --- cutter ---
cutter = VideoCutter()
res = cutter.cut(video)
stable, unstable = res.get_range()
data_home = res.pick_and_save(stable, 5)

# --- classify ---
cl = SVMClassifier(compress_rate=0.4)
cl.load(data_home)
cl.train()
classify_result = cl.classify(video, stable, keep_data=True)
result_dict = classify_result.to_dict()

# --- draw ---
r = Reporter()
r.draw(classify_result)
Example #30
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SSIMClassifier
from stagesepx.reporter import Reporter

# cut
video_path = '../test.mp4'
cutter = VideoCutter()
res = cutter.cut(video_path)
stable = res.get_stable_range()

# classify
cl = SSIMClassifier()
cl.load(stable)

res = cl.classify(
    video_path,
    stable,
)

# draw
Reporter.draw(res)