Exemple #1
0
    def cut(self,
            video_path: str,
            output_path: str = None,
            threshold: float = 0.95,
            frame_count: int = 5,
            compress_rate: float = 0.2,
            limit: int = None):
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(threshold=threshold, limit=limit)

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(output_path or data_home,
                                     'cut_result.json')
        res.dump(res_json_path)
Exemple #2
0
def test_diff_with_hook():
    cutter = VideoCutter()
    res = cutter.cut(VIDEO_PATH)
    res1 = cutter.cut(ANOTHER_VIDEO_PATH)

    for each in (res, res1):
        stable, _ = each.get_range()
        res.pick_and_save(stable, 3)

    hook = [CropHook(size=(0.5, 0.5))]

    diff = res.diff(res1, pre_hooks=hook, frame_count=5)
    pprint.pprint(diff.data)
    assert diff.data
    assert not diff.any_stage_lost()
Exemple #3
0
    def one_step(self,
                 video_path: str,
                 output_path: str = None,
                 threshold: float = 0.95,
                 frame_count: int = 5,
                 compress_rate: float = 0.2,
                 limit: int = None):
        """
        one step => cut, classifier, draw

        :param video_path: your video path
        :param output_path: output path (dir)
        :param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
        :param frame_count: default to 5, and finally you will get 5 frames for each range
        :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
        :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
        :return:
        """

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video_path, compress_rate=compress_rate)
        stable, unstable = res.get_range(
            threshold=threshold,
            limit=limit,
        )

        data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
        res_json_path = os.path.join(data_home, 'cut_result.json')
        res.dump(res_json_path)

        # --- classify ---
        cl = SVMClassifier(compress_rate=compress_rate)
        cl.load(data_home)
        cl.train()
        classify_result = cl.classify(video_path, stable)

        # --- draw ---
        r = Reporter()
        r.add_dir_link(data_home)
        r.draw(
            classify_result,
            report_path=os.path.join(data_home, 'report.html'),
            cut_result=res,
        )
Exemple #4
0
def _diff(
    video_before: typing.Union[str, VideoObject],
    video_after: typing.Union[str, VideoObject],
    pre_hooks: typing.List[BaseHook] = None,
    *args,
    **kwargs,
):
    cutter = VideoCutter()
    if isinstance(video_before, str):
        video_before = VideoObject(video_before)
        video_before.load_frames()
    if isinstance(video_after, str):
        video_after = VideoObject(video_after)
        video_after.load_frames()

    res = cutter.cut(video_before)
    res1 = cutter.cut(video_after)
    return res.diff(res1, pre_hooks, *args, **kwargs)
Exemple #5
0
def _cut(
    video: typing.Union[str, VideoObject],
    output_path: str = None,
    threshold: float = constants.DEFAULT_THRESHOLD,
    frame_count: int = 5,
    compress_rate: float = 0.2,
    target_size: typing.Tuple[int, int] = None,
    offset: int = 3,
    limit: int = None,
) -> typing.Tuple[VideoCutResult, str]:
    """
    cut the video, and get series of pictures (with tag)

    :param video: video path or object
    :param output_path: output path (dir)
    :param threshold: float, 0-1, default to 0.98. decided whether a range is stable. larger => more unstable ranges
    :param frame_count: default to 5, and finally you will get 5 frames for each range
    :param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
    :param target_size: (100, 200)
    :param offset:
        it will change the way to decided whether two ranges can be merged
        before: first_range.end == second_range.start
        after: first_range.end + offset >= secord_range.start
    :param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5

    :return: tuple, (VideoCutResult, data_home)
    """
    if isinstance(video, str):
        video = VideoObject(video)

    cutter = VideoCutter()
    res = cutter.cut(video,
                     compress_rate=compress_rate,
                     target_size=target_size)
    stable, unstable = res.get_range(threshold=threshold,
                                     limit=limit,
                                     offset=offset)

    data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
    res_json_path = os.path.join(output_path or data_home,
                                 constants.CUT_RESULT_FILE_NAME)
    res.dump(res_json_path)
    return res, data_home
Exemple #6
0
def test_default():
    cutter = VideoCutter()
    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 3, "count of stable range is not correct"

    if os.path.exists(RESULT_DIR):
        shutil.rmtree(RESULT_DIR)

    data_home = res.pick_and_save(stable, 5, to_dir=RESULT_DIR)
    assert data_home == RESULT_DIR
    assert os.path.isdir(data_home), "result dir not existed"

    # run again to test covering result
    data_home = res.pick_and_save(stable, 5, to_dir=RESULT_DIR)
    assert data_home == RESULT_DIR
    assert os.path.isdir(data_home), "result dir not existed"

    sub_dir_0 = os.path.join(data_home, "0")
    assert len(os.listdir(sub_dir_0)) == 10
    return res
Exemple #7
0
def analyse(
    video: typing.Union[str, VideoObject],
    output_path: str,
    pre_load: bool = True,
    threshold: float = 0.98,
    offset: int = 3,
    boost_mode: bool = True,
):
    """ designed for https://github.com/williamfzc/stagesepx/issues/123 """

    if isinstance(video, str):
        video = VideoObject(video, pre_load=pre_load)

    cutter = VideoCutter()
    res = cutter.cut(video)

    stable, unstable = res.get_range(
        threshold=threshold,
        offset=offset,
    )

    with tempfile.TemporaryDirectory() as temp_dir:
        res.pick_and_save(
            stable,
            5,
            to_dir=temp_dir,
        )

        cl = SVMClassifier()
        cl.load(temp_dir)
        cl.train()
        classify_result = cl.classify(video, stable, boost_mode=boost_mode)

    r = Reporter()
    r.draw(
        classify_result,
        report_path=output_path,
        unstable_ranges=unstable,
        cut_result=res,
    )
Exemple #8
0
    def handle(self, video_path: str) -> bool:
        super(NormalHandler, self).handle(video_path)
        video = VideoObject(video_path)
        if self.preload:
            video.load_frames()

        # --- cutter ---
        cutter = VideoCutter()
        res = cutter.cut(video)
        stable, unstable = res.get_range(threshold=0.98, offset=3)
        data_home = res.pick_and_save(stable,
                                      self.frame_count,
                                      to_dir=self.result_path)

        # --- classify ---
        cl = SVMClassifier()
        cl.load(data_home)
        cl.train()
        self.classifier_result = cl.classify(video, stable)

        # --- draw ---
        r = Reporter()
        r.draw(self.classifier_result, report_path=self.result_report_path)
        return True
Exemple #9
0
def test_step():
    cutter = VideoCutter(step=2)
    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    # when limit=3, final stage should be ignored.
    assert len(stable) == 1, "count of stable range is not correct"
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook()
    hook2 = IgnoreHook(size=(0.5, 0.5))
    frame_home = os.path.join(PROJECT_PATH, "frame_save_dir")
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(size=(0.5, 0.5), offset=(0.0, 0.5))
    hook5 = RefineHook()
    hook6 = InterestPointHook()
    hook7 = TemplateCompareHook({"amazon": IMAGE_PATH})

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.9)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, "count of stable range is not correct"

    data_home = res.pick_and_save(stable, 5)
    assert os.path.isdir(data_home), "result dir not existed"

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, "report.html")
    r.draw(classify_result, report_path=report_path, cut_result=res)
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Exemple #11
0
def test_window():
    cutter = VideoCutter()
    v = VideoObject(VIDEO_PATH)
    res = cutter.cut(v, window_size=2, window_coefficient=2)
    assert res
Exemple #12
0
from stagesepx.cutter import VideoCutter

import pprint

video_path = '../test1.mp4'
another_video_path = '../test2.mp4'

cutter = VideoCutter()
res = cutter.cut(video_path, compress_rate=0.1)
res1 = cutter.cut(another_video_path, compress_rate=0.1)

# version >= 0.4.3
pprint.pprint(res.diff(res1, frame_count=3))
Exemple #13
0
from stagesepx.cutter import VideoCutter

# 改为你的视频路径
video_path = '../test.mp4'

cutter = VideoCutter(
    # 步长,默认为2,通过它可以自行把握效率与颗粒度
    # 设定为2时,会以2帧为一个单位进行遍历
    step=2,
    # 默认为0.2,即将图片缩放为0.2倍
    # 主要为了提高计算效率
    compress_rate=0.2)

# 开始切割
res = cutter.cut(video_path)

# 你可以通过res获取切割结果
# 例如稳定状态对应的区间
# limit能够过滤掉一些过于短的阶段(例如你不希望一些持续时间过短的阶段被认为是一个稳态),默认不过滤
stable = res.get_stable_range(
    # 判定阶段是否稳定的阈值
    # 越高则越严格(判定为稳定的区间更少)
    # 默认为 0.95 (0-1)
    threshold=0.95)
# 不稳定状态(正在变化)
unstable = res.get_unstable_range()

# 由于所有的阶段都是自动侦测的,可能发生的一个状况是:
# 你对同一个场景重复录制了几次视频,但可能由于拍摄效果与环境的影响,每个视频得到的阶段数量不一致
# 基于findit,用户能够直接对阶段进行检测,以确保阶段对应的内容符合预期
# 例如,你希望第二个稳定阶段中的帧必须包含某图像(路径为a.png),可以:
Exemple #14
0
def test_hook():
    # init hook
    hook = ExampleHook()
    hook1 = ExampleHook(overwrite=True)
    hook2 = IgnoreHook(size=(0.5, 0.5), overwrite=True)
    frame_home = os.path.join(PROJECT_PATH, 'frame_save_dir')
    hook3 = FrameSaveHook(frame_home)
    hook4 = CropHook(
        size=(0.5, 0.5),
        offset=(0., 0.5),
        overwrite=True,
    )
    hook5 = RefineHook()
    hook6 = InvalidFrameDetectHook()
    hook7 = TemplateCompareHook({
        'amazon': IMAGE_PATH,
    })

    # --- cutter ---
    cutter = VideoCutter(compress_rate=0.8)
    # add hook
    cutter.add_hook(hook)
    cutter.add_hook(hook1)
    cutter.add_hook(hook2)
    cutter.add_hook(hook3)
    cutter.add_hook(hook4)
    cutter.add_hook(hook5)
    cutter.add_hook(hook6)
    cutter.add_hook(hook7)

    res = cutter.cut(VIDEO_PATH)
    stable, unstable = res.get_range()
    assert len(stable) == 2, 'count of stable range is not correct'

    data_home = res.pick_and_save(
        stable,
        5,
    )
    assert os.path.isdir(data_home), 'result dir not existed'

    # --- classify ---
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()
    classify_result = cl.classify(VIDEO_PATH, stable)

    # --- draw ---
    r = Reporter()
    report_path = os.path.join(data_home, 'report.html')
    r.draw(
        classify_result,
        report_path=report_path,
        cut_result=res,
    )
    assert os.path.isfile(report_path)

    # hook check
    assert os.path.isdir(frame_home)
    assert hook6.result
    assert hook7.result
Exemple #15
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SSIMClassifier
from stagesepx.reporter import Reporter

# # cut
video_path = 'test.mp4'
cutter = VideoCutter(step=2)
res = cutter.cut(video_path)
stable = res.get_stable_range()
data_home = res.pick_and_save(stable, 3)

# classify
cl = SSIMClassifier()
cl.load(data_home)
res = cl.classify(video_path)

# draw
Reporter.draw(res, report_path=f'{data_home}/report.html')
Exemple #16
0
"""
利用训练好的模型,建立长期的视频分析工作流

在 train.py 之后,你应该能得到一个 model.pkl 模型
"""

from stagesepx.classifier import SVMClassifier
from stagesepx.cutter import VideoCutter
from stagesepx.reporter import Reporter

TARGET_VIDEO = '../../demo.mp4'

# cut
# 这里依旧使用了 cut,主要目的还是为了可以比较好的处理变化中的过程
# 但这次我们不需要用到 pick_and_save,因为这次 classifier 不会使用 cutter 的数据
cutter = VideoCutter()
res = cutter.cut(TARGET_VIDEO)
stable, _ = res.get_range()

# classify
# 这里的参数需要保持与train.py一致,如果你有改动的话
cl = SVMClassifier()
cl.load_model('./model.pkl')

classify_result = cl.classify(
    TARGET_VIDEO,
    stable,
)

r = Reporter()
r.draw(
Exemple #17
0
def run(config: typing.Union[dict, str]):
    """
    run with config

    :param config: config file path, or a preload dict
    :return:
    """
    class _VideoUserConfig(BaseModel):
        path: str
        pre_load: bool = True
        fps: int = None

    class _CutterUserConfig(BaseModel):
        threshold: float = None
        frame_count: int = None
        offset: int = None
        limit: int = None
        block: int = None

        # common
        compress_rate: float = None
        target_size: typing.Tuple[int, int] = None

    class _ClassifierType(Enum):
        SVM = "svm"
        KERAS = "keras"

    class _ClassifierUserConfig(BaseModel):
        boost_mode: bool = None
        classifier_type: _ClassifierType = _ClassifierType.SVM
        model: str = None

        # common
        compress_rate: float = None
        target_size: typing.Tuple[int, int] = None

    class _CalcOperatorType(Enum):
        BETWEEN = "between"
        DISPLAY = "display"

    class _CalcOperator(BaseModel):
        name: str
        calc_type: _CalcOperatorType
        args: dict = dict()

    class _CalcUserConfig(BaseModel):
        output: str = None
        ignore_error: bool = None
        operators: typing.List[_CalcOperator] = None

    class _ExtraUserConfig(BaseModel):
        save_train_set: str = None

    class UserConfig(BaseModel):
        output: str
        video: _VideoUserConfig
        cutter: _CutterUserConfig = _CutterUserConfig()
        classifier: _ClassifierUserConfig = _ClassifierUserConfig()
        calc: _CalcUserConfig = _CalcUserConfig()
        extras: _ExtraUserConfig = _ExtraUserConfig()

    if isinstance(config, str):
        # path
        config_path = pathlib.Path(config)
        assert config_path.is_file(), f"no config file found in {config_path}"

        # todo: support different types in the future
        assert config_path.as_posix().endswith(
            ".json"), "config file should be json format"
        with open(config_path, encoding=constants.CHARSET) as f:
            config = json.load(f)

    config = UserConfig(**config)
    logger.info(f"config: {config}")

    # main flow
    video = VideoObject(
        # fmt: off
        path=config.video.path,
        fps=config.video.fps,
    )
    if config.video.pre_load:
        video.load_frames()

    # cut
    cutter = VideoCutter(
        # fmt: off
        compress_rate=config.cutter.compress_rate,
        target_size=config.cutter.target_size,
    )
    res = cutter.cut(
        # fmt: off
        video=video,
        block=config.cutter.block,
    )
    stable, unstable = res.get_range(
        # fmt: off
        threshold=config.cutter.threshold,
        offset=config.cutter.offset,
    )

    with tempfile.TemporaryDirectory() as temp_dir:
        # classify
        if config.classifier.classifier_type is _ClassifierType.SVM:
            cl = SVMClassifier(
                # fmt: off
                compress_rate=config.classifier.compress_rate,
                target_size=config.classifier.target_size,
            )
        elif config.classifier.classifier_type is _ClassifierType.KERAS:
            from stagesepx.classifier.keras import KerasClassifier

            cl = KerasClassifier(
                # fmt: off
                compress_rate=config.classifier.compress_rate,
                target_size=config.classifier.target_size,
            )
        # validation has been applied by pydantic
        # so no `else`

        if config.classifier.model:
            # no need to retrain
            model_path = pathlib.Path(config.classifier.model)
            assert model_path.is_file(), f"file {model_path} not existed"
            cl.load_model(model_path)
        else:
            # train a new model
            train_set_dir = config.extras.save_train_set or temp_dir
            os.makedirs(train_set_dir, exist_ok=True)

            res.pick_and_save(
                # fmt: off
                stable,
                frame_count=config.cutter.frame_count,
                to_dir=train_set_dir,
            )
            cl.train(data_path=train_set_dir)

    # start classifying
    classify_result = cl.classify(
        # fmt: off
        video,
        stable,
        boost_mode=config.classifier.boost_mode,
    )

    # calc
    def _calc_display() -> dict:
        # jsonify
        return json.loads(classify_result.dumps())

    def _calc_between(*, from_stage: str = None, to_stage: str = None) -> dict:
        assert classify_result.contain(
            from_stage), f"no stage {from_stage} found in result"
        assert classify_result.contain(
            to_stage), f"no stage {to_stage} found in result"
        from_frame = classify_result.last(from_stage)
        to_frame = classify_result.first(to_stage)
        cost = to_frame.timestamp - from_frame.timestamp
        return {
            "from": from_frame.frame_id,
            "to": to_frame.frame_id,
            "cost": cost,
        }

    _calc_func_dict = {
        _CalcOperatorType.BETWEEN: _calc_between,
        _CalcOperatorType.DISPLAY: _calc_display,
    }
    calc_output = config.calc.output
    if calc_output:
        output_path = pathlib.Path(calc_output)
        assert not output_path.is_file(), f"file {output_path} already existed"
        result = []
        for each_calc in config.calc.operators:
            func = _calc_func_dict[each_calc.calc_type]
            try:
                func_ret = func(**each_calc.args)
            except Exception as e:
                if not config.calc.ignore_error:
                    raise
                logger.warning(e)
                func_ret = traceback.format_exc()
            calc_ret = {
                "name": each_calc.name,
                "type": each_calc.calc_type.value,
                "result": func_ret,
            }
            result.append(calc_ret)
        with open(output_path, "w", encoding=constants.CHARSET) as f:
            json.dump(result, f)

    # draw
    r = Reporter()
    r.draw(
        # fmt: off
        classify_result,
        report_path=config.output,
    )
Exemple #18
0
from stagesepx.cutter import VideoCutter, VideoCutResult
from stagesepx.classifier import SVMClassifier
from stagesepx.reporter import Reporter
from stagesepx.hook import ExampleHook, CropHook, IgnoreHook
import os

video_path = '../demo.mp4'

# --- cut ---
cutter = VideoCutter(
    # 步长,默认为1,通过它可以自行把握效率与颗粒度
    # 设定为2时,会以2帧为一个单位进行遍历
    # 即跳过一帧
    step=1,
    # 默认为0.2,即将图片缩放为0.2倍
    # 主要为了提高计算效率
    # 如果你担心影响分析效果,可以将其提高
    compress_rate=0.2,
    # 或者直接指定尺寸
    # 当压缩率与指定尺寸同时传入时,优先以指定尺寸为准
    # target_size=(200, 400),
)

# 在 0.4.2 之后,hook特性正式被加入:https://williamfzc.github.io/stagesepx/#/pages/3_how_it_works?id=hook
# 使用极其简单,你只需要初始化 hook
hook = ExampleHook()
# 再将 hook 添加到 cutter 或者 classifier 中去
cutter.add_hook(hook)
# 支持多个hook,他们会按顺序执行
# 当 overwrite 被设置为 true 时,hook的修改将会持续影响到后续的分析
# 否则 hook 操作的都是 frame 的副本
Exemple #19
0
def test_cut_range():
    cutter = VideoCutter()
    res = cutter.cut(VIDEO_PATH)
    stable, _ = res.get_range()
    stable[0].contain_image(IMAGE_PATH)
    stable[0].is_loop(0.95)
Exemple #20
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SVMClassifier
from stagesepx.reporter import Reporter

video_list = [
    '../test.mp4',
    # 把别的视频也配置在这里即可
]

for each_video_path in video_list:
    cutter = VideoCutter()
    res = cutter.cut(each_video_path)
    stable = res.get_stable_range()
    data_home = res.pick_and_save(stable, 3)
    print(stable)

    # classify
    cl = SVMClassifier()
    cl.load(data_home)
    cl.train()

    # 注意,如果在classify方法指定了范围
    # 那么分析时只会分析处于范围内的帧!
    # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
    res = cl.classify(
        each_video_path,
        stable,
        # 步长,可以自行设置用于平衡效率与颗粒度
        # 默认为1,即每帧都检测
        step=1)
Exemple #21
0
def get_range(_train_or_forecast, _forecast_file_name, _param, _picture_path):
    file_name = re.search(r'\\(.*).mp4', str(_forecast_file_name),
                          re.M | re.I).group(1)
    video = VideoObject(_forecast_file_name, pre_load=True)
    # 新建帧,计算视频总共有多少帧,每帧多少ms
    video.load_frames()
    # 直接切割视频
    # 压缩视频
    cutter = VideoCutter(compress_rate=_param[0])

    # 添加Hook
    '''
    convert_size_and_offset:1zai 51 - origin size: ((468, 216)) 显示的是整张图的尺寸,坐标原点在左上角。
    468是高,216是宽。(468, 216)是整个画面的右下角
    stagesepx.hook:convert_size_and_offset:153 - size: (23.4, 216) 显示的是被屏蔽或者是被选择的区域的高和宽。高是23.4,宽是216。
    convert_size_and_offset:160。final range h: (0, 23), w: (0, 216) 显示的是被屏蔽或者是被选择的区域。
    从左上角开始计算,屏蔽掉0到23高度,整个宽度的区域。
    
    如果要统计竖屏切换到横屏场景的耗时。为了防止录屏文件中,横屏后的"五日分数图"内容被压缩的太小,建议在录屏时选择横屏录制。
    横屏录制场景下,某一帧图像中,App的内容处于竖屏状态下时,该帧图像表现为,左边一部分黑屏,中间是手机竖屏页面,右边还是一部分黑屏。
    此时如果要屏蔽或者选择某一块区域,要把黑色区域的尺寸也算在内。
    如果只通过中间部分有手机内容图像的尺寸,来选择或者屏蔽部分区域,则会出现被选择的区域和预期不符的情况。
    
    # 屏蔽盘口数据 size=(0.1, 1),
    # 屏蔽手机导航栏 size=(0.05, 1),
    '''
    '''
    # 屏蔽帧的右边
    hook_ignore1 = IgnoreHook(
        size=(1, 1),
        offset=(0, 0.55),
        overwrite=True,
    )
    # 屏蔽帧的左边
    hook_ignore2 = IgnoreHook(
        size=(1, 0.45),
        overwrite=True,
    )
    # 屏蔽帧的上边
    hook_ignore3 = IgnoreHook(
        size=(0.35, 1),
        overwrite=True,
    )
    # 屏蔽帧的下边
    hook_ignore4 = IgnoreHook(
        size=(1, 1),
        offset=(0.6, 0),
        overwrite=True,
    )
    hook_crop = CropHook(
        size=(0.2, 0.2),
        overwrite=True,
    )
    '''

    # cutter.add_hook(hook_ignore1)
    # cutter.add_hook(hook_ignore2)
    # cutter.add_hook(hook_ignore3)
    # cutter.add_hook(hook_ignore4)
    # cutter.add_hook(hook_crop)
    # hook_save_frame = FrameSaveHook('../frame_save_dir')
    # cutter.add_hook(hook_save_frame)

    # 计算每一帧视频的每一个block的ssim和psnr。block=4则算16个part的得分
    # res = cutter.cut(video)
    res = cutter.cut(video, block=_param[3])
    # 计算出哪些区间是稳定的,哪些是不稳定的。判断A帧到B帧之间是稳定还是不稳定
    # 是不是在这里就决定,把稳定的A到B帧,放到一个文件夹。如果offset大,就扩大A到B的间隔。
    stable, unstable = res.get_range(threshold=_param[1], offset=_param[2])
    # stable, unstable = res.get_range(threshold=_param[1], offset=_param[2], limit=5,)
    # stable, unstable = res.get_range(threshold=_param[1], offset=_param[2], psnr_threshold=0.85)
    # stable, unstable = res.get_range(threshold=_param[1])
    if _train_or_forecast == 'train':
        print("pick_and_save")
        # 把分好类的稳定阶段的图片存本地
        res.pick_and_save(stable,
                          20,
                          to_dir=_picture_path + '/train_stable_' + file_name,
                          meaningful_name=True)
        # 把分好类的不稳定阶段的图片存本地
        res.pick_and_save(unstable,
                          40,
                          to_dir=_picture_path + '/train_unstable_' +
                          file_name,
                          meaningful_name=True)
    else:
        res.pick_and_save(stable,
                          20,
                          to_dir=_picture_path + '/forecast_stable_' +
                          file_name,
                          meaningful_name=True)
        # 把分好类的不稳定阶段的图片存本地
        res.pick_and_save(unstable,
                          40,
                          to_dir=_picture_path + '/forecast_unstable_' +
                          file_name,
                          meaningful_name=True)

    return stable
Exemple #22
0
from stagesepx.cutter import VideoCutter
from stagesepx.classifier import SVMClassifier

video_path = "../demo.mp4"
amazon_image_path = "../amazon.png"
phone_image_path = "../phone.png"
message_image_path = "../message.png"

cutter = VideoCutter()
res = cutter.cut(video_path)
stable, _ = res.get_range()

# 检查最后一个阶段中是否包含图片 person.png
# 这种做法会在阶段中间取一帧进行模板匹配
# 当然,这种做法并不常用,最常用还是用于检测最终结果而不是中间量
# 值得注意,这里的模板匹配会受到压缩率的影响
# 虽然已经做了分辨率拟合,但是如果压缩率过高,依旧会出现图像难以辨认而导致的误判
# 正常来说没什么问题
match_result = stable[-1].contain_image(amazon_image_path,
                                        engine_template_scale=(0.5, 2, 5))
print(match_result)
# 分别输出:最可能存在的坐标、相似度、计算是否正常完成
# {'target_point': [550, 915], 'target_sim': 0.9867244362831116, 'ok': True}

data_home = res.pick_and_save(stable, 5)
cl = SVMClassifier()
cl.load(data_home)
cl.train()
classify_result = cl.classify(video_path, stable, keep_data=True)
result_dict = classify_result.to_dict()
from stagesepx.classifier import SVMClassifier
from stagesepx.cutter import VideoCutter
from stagesepx.hook import IgnoreHook
from stagesepx.reporter import Reporter
from stagesepx.video import VideoObject
import pprint
from stagesepx.classifier.keras import KerasClassifier

# 将视频切分成帧
file_name = './video_for_train.mp4'
video = VideoObject(file_name)
# 新建帧,计算视频总共有多少帧,每帧多少ms
video.load_frames()
# 压缩视频
cutter = VideoCutter()
# 计算每一帧视频的每一个block的ssim和psnr。
res = cutter.cut(video, block=6)
# 计算出哪些区间是稳定的,哪些是不稳定的。判断A帧到B帧之间是稳定还是不稳定
stable, unstable = res.get_range(threshold=0.97, offset=2)
# 把分好类的稳定阶段的图片存本地
res.pick_and_save(stable,
                  100,
                  to_dir='./picture/train_stable_frame',
                  meaningful_name=True)

# 训练模型文件
cl = KerasClassifier(
    # 训练轮数
    epochs=10)
cl.train('./train_stable_frame')
cl.save_model('./model.h5', overwrite=True)