コード例 #1
0
def get_video_annotation():
    setup_tensorflow()

    output_dir = mkdtemp()
    png_output = join(output_dir, "png")
    oflow_output = join(output_dir, "oflow")

    de = DataExtraction(FrameInput(
        input_path="test-data/video-clip.mp4", bsize=256
    ))

    freq = 128
    de.run_annotators([
        ColorHistogramAnnotator(freq=freq),
        DominantColorAnnotator(freq=freq),
        DiffAnnotator(quantiles=[40]),
        EmbedAnnotator(embedding=EmbedFrameKerasResNet50(), freq=freq),
        FaceAnnotator(
            detector=FaceDetectMtcnn(),
            embedding=FaceEmbedVgg2(),
            freq=freq
        ),
        HOFMAnnotator(freq=freq),
        ObjectAnnotator(detector=ObjectDetectRetinaNet(), freq=freq),
        OpticalFlowAnnotator(output_dir=oflow_output, freq=freq),
        PngAnnotator(output_dir=png_output, freq=freq)
    ], max_batch=2)

    return de, output_dir
コード例 #2
0
def get_video_frame_annotation():
    setup_tensorflow()

    output_dir = mkdtemp()
    png_output = join(output_dir, "png")
    oflow_output = join(output_dir, "oflow")

    de = DataExtraction(FrameInput(
        input_path="test-data/video-clip.mp4", bsize=128
    ))

    frames = [1, 3, 310]   # make sure there is an empty batch: 128-255
    de.run_annotators([
        ColorHistogramAnnotator(frames=frames, colorspace="lab"),
        DominantColorAnnotator(frames=frames),
        DiffAnnotator(quantiles=[40]),
        EmbedAnnotator(embedding=EmbedFrameKerasResNet50(), frames=frames),
        FaceAnnotator(
            detector=FaceDetectMtcnn(),
            embedding=FaceEmbedVgg2(),
            frames=frames
        ),
        HOFMAnnotator(frames=frames),
        ObjectAnnotator(detector=ObjectDetectRetinaNet(), frames=frames),
        OpticalFlowAnnotator(output_dir=oflow_output, frames=frames),
        PngAnnotator(output_dir=png_output, frames=frames),
        ImgAnnotator()
    ])

    return de, output_dir
コード例 #3
0
    def test_max_batch(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=3)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (3 * 8)
コード例 #4
0
    def test_process_full_output(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (128 * 3)
コード例 #5
0
ファイル: test_annotators.py プロジェクト: Nanne/dvt
    def test_no_quantiles(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")

        assert set(obj_out.keys()) == set(["video", "frame", "avg_value"])
        assert issubclass(type(obj_out["avg_value"]), np.ndarray)
コード例 #6
0
    def test_collect_all(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(FrameAnnotator())
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)

        output = fpobj.collect_all()

        assert output["base"] == DictFrame()
        assert output["diff"].shape[0] == (2 * 8)
コード例 #7
0
ファイル: test_aggregators.py プロジェクト: Nanne/dvt
    def test_cutoff_empty(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator()
        agg = ca.aggregate(obj_out)

        assert agg["frame_start"] == list(range(256))
コード例 #8
0
ファイル: test_aggregators.py プロジェクト: Nanne/dvt
    def test_cutoff(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator(cut_vals={"h40": 0.2, "q40": 3})
        agg = ca.aggregate(obj_out)

        assert set(agg.keys()) == set(["video", "frame_start", "frame_end"])
コード例 #9
0
ファイル: test_annotators.py プロジェクト: Nanne/dvt
    def test_quantiles(self):

        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40, 50]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")
        keys = list(obj_out.keys())

        assert issubclass(type(obj_out["q40"]), np.ndarray)
        assert issubclass(type(obj_out["q50"]), np.ndarray)
        assert issubclass(type(obj_out["h40"]), np.ndarray)
        assert issubclass(type(obj_out["h50"]), np.ndarray)
コード例 #10
0
ファイル: test_aggregators.py プロジェクト: Nanne/dvt
    def test_cutoff_ignore(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator(
            cut_vals={"h40": 0.2, "q40": 3}, ignore_vals={"avg_value": 70}
        )
        agg = ca.aggregate(obj_out)

        assert agg == DictFrame()
コード例 #11
0
def main(argv):
    try:
        opts, args = getopt.getopt(
            argv, "hf:q:c:h:",
            ["file=", "quantiles=", "cutvals=", "histdiff="])

    except getopt.GetoptError:
        print(
            'dvt_to_sat.py -f <filename> -q <quantiles> -c <cutvals> -h <histdiff>'
        )
        sys.exit(2)

    filename = ''
    quantiles = 40
    cutvals = 3
    histdiff = 0

    for opt, arg in opts:
        if opt == '-h':
            print(
                'dvt_to_sat.py -f <filename> -q <quantiles> -c <cutvals> -h <histdiff>'
            )
            sys.exit()
        elif opt in ("-f", "--file"):
            filename = arg
        elif opt in ("-q", "--quantiles"):
            quantiles = arg
        elif opt in ("-c", "--cutvals"):
            cutvals = arg
        elif opt in ("-h", "--histdiff"):
            cutvals = arg

    if (filename == ''):
        print('missing param: filename is required')
        sys.exit(2)

    basename = os.path.basename(filename)

    logging.basicConfig(level='INFO')
    finput = FrameInput(filename, bsize=128)
    fpobj = FrameProcessor()
    fpobj.load_annotator(MetaAnnotator())
    fpobj.load_annotator(DiffAnnotator(quantiles=[quantiles]))
    fpobj.process(finput)
    #fpobj.process(finput, max_batch=5)

    obj = fpobj.collect_all()
    meta = obj['meta'].todf()
    obj['diff'].todf().head()

    cagg = CutAggregator(cut_vals={
        'h40': histdiff,
        'q' + str(quantiles): cutvals
    })
    fps = meta['fps'][0]
    print(fps)
    df1 = cagg.aggregate(obj).todf()
    df1['seconds_start'] = df1['frame_start'] / fps
    df1['seconds_end'] = df1['frame_end'] / fps

    out = df1.to_csv()

    f = open(basename + ".csv", "w")
    f.write(out)
    f.close()