Exemplo n.º 1
0
    def test_batch_size(self):
        fobj = FrameInput("test-data/video-clip.mp4", bsize=4)

        batch = fobj.next_batch()
        bframes = batch.get_batch()
        assert bframes.shape[0] == 4
        assert batch.bsize == 4
Exemplo n.º 2
0
    def test_get_frame_names(self):
        fobj = FrameInput("test-data/video-clip.mp4", bsize=4)

        batch = fobj.next_batch()
        batch = fobj.next_batch()
        fnames = batch.get_frame_names()

        assert fnames == [4, 5, 6, 7]
Exemplo n.º 3
0
    def test_batch_bsize(self):
        fobj = FrameInput("test-data/video-clip.mp4", bsize=16)
        batch = fobj.next_batch()
        frames = batch.get_batch()

        assert batch.bnum == 0
        assert fobj.continue_read
        assert frames.shape[0] == 16
Exemplo n.º 4
0
    def test_continue_bnum(self):
        fobj = FrameInput("test-data/video-clip.mp4", bsize=256)

        batch = fobj.next_batch()
        assert batch.bnum == 0
        assert fobj.continue_read

        batch = fobj.next_batch()
        assert batch.bnum == 1
        assert not fobj.continue_read

        frames = batch.get_frames()
        bwidth = frames[batch.bsize :, :, :, :]
        assert np.max(bwidth) == 0
Exemplo n.º 5
0
    def test_fixed_frames(self):
        # only grab these frames
        frames = [0, 3, 17, 18, 21]

        # create processor pipeline
        fpobj = FrameProcessor()
        fpobj.load_annotator(CIElabAnnotator(frames=frames))
        fpobj.load_annotator(ClutterAnnotator(frames=frames))
        fpobj.load_annotator(
            EmbedAnnotator(embedding=EmbedFrameKerasResNet50(), frames=frames))
        fpobj.load_annotator(
            FaceAnnotator(detector=FaceDetectDlib(), frames=frames))
        fpobj.load_annotator(
            ObjectAnnotator(detector=ObjectDetectRetinaNet(), frames=frames))

        # run over the input, making sure to include a batch (the middle one)
        # that does not have any data
        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=3)

        # check that the output is the correct size
        assert fpobj.collect("clutter")["frame"].tolist() == frames
        assert fpobj.collect("cielab")["frame"].tolist() == frames
        assert fpobj.collect("embed")["frame"].tolist() == frames
        assert set(fpobj.collect("face")["frame"]) == set(frames)
        assert set(fpobj.collect("object")["frame"]) == set(frames)
Exemplo n.º 6
0
    def test_fprint(self):
        face_anno = FaceAnnotator(
            detector=FaceDetectDlib(), embedding=FaceEmbedVgg2(), freq=4
        )

        fpobj = FrameProcessor()
        fpobj.load_annotator(face_anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        pa = PeopleAggregator(
            face_names=["person 1", "person 2"],
            fprint=obj_out["face"]["embed"][[0, 1]],
        )
        agg = pa.aggregate(obj_out).todf()

        assert set(agg.keys()) == set(
            [
                "video",
                "frame",
                "top",
                "bottom",
                "right",
                "left",
                "confidence",
                "person",
                "person-dist",
            ]
        )
Exemplo n.º 7
0
    def test_process_empty_output(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(FrameAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=4)
        fpobj.process(finput)

        assert fpobj.collect("base") == DictFrame()
Exemplo n.º 8
0
    def test_sub_image_reshape(self):

        finput = FrameInput("test-data/video-clip.mp4", bsize=1)
        batch = get_batch(finput, batch_num=0)
        img = batch.get_frames()[0, :, :, :]

        simg = sub_image(img, 32, 64, 64, 32, output_shape=(100, 100))
        assert simg.shape == (100, 100, 3)
Exemplo n.º 9
0
    def test_sub_image_fct(self):

        finput = FrameInput("test-data/video-clip.mp4", bsize=1)
        batch = get_batch(finput, batch_num=0)
        img = batch.get_frames()[0, :, :, :]

        simg = sub_image(img, top=0, right=128, bottom=64, left=0, fct=1.5)
        assert simg.shape == (80, 160, 3)
Exemplo n.º 10
0
    def test_get_batch(self):

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        batch = get_batch(finput, batch_num=3)

        assert issubclass(type(batch), FrameBatch)
        assert batch.bnum == 3
        assert batch.bsize == 8
Exemplo n.º 11
0
    def test_max_batch(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=3)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (3 * 8)
Exemplo n.º 12
0
    def test_metadata(self):
        fobj = FrameInput("test-data/video-clip.mp4")

        assert fobj.meta["type"] == "video"
        assert fobj.meta["fps"] == 29.97002997002997
        assert fobj.meta["frames"] == 379
        assert fobj.meta["height"] == 480
        assert fobj.meta["width"] == 708
        assert fobj.vname == "video-clip.mp4"
Exemplo n.º 13
0
    def test_process_full_output(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (128 * 3)
Exemplo n.º 14
0
    def test_no_quantiles(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")

        assert set(obj_out.keys()) == set(["video", "frame", "avg_value"])
        assert issubclass(type(obj_out["avg_value"]), np.ndarray)
Exemplo n.º 15
0
    def test_object_detection_cutoff(self):
        anno = ObjectAnnotator(freq=4,
                               detector=ObjectDetectRetinaNet(cutoff=0.6))
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("object")

        assert obj_out.shape == (12, 8)
Exemplo n.º 16
0
    def test_histogram_only(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(CIElabAnnotator(num_dominant=0))

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("cielab")
        keys = list(obj_out.keys())

        assert set(obj_out.keys()) == set(["video", "frame", "cielab"])
        assert issubclass(type(obj_out["cielab"]), np.ndarray)
        assert obj_out["cielab"].shape == (16, 4096)
Exemplo n.º 17
0
    def test_clutter(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(ClutterAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("clutter")
        keys = list(obj_out.keys())

        assert set(obj_out.keys()) == set(["video", "frame", "clutter"])
        assert issubclass(type(obj_out["clutter"]), np.ndarray)
        assert obj_out["clutter"].shape == (16, 1)
Exemplo n.º 18
0
    def test_cutoff(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator(cut_vals={"h40": 0.2, "q40": 3})
        agg = ca.aggregate(obj_out)

        assert set(agg.keys()) == set(["video", "frame_start", "frame_end"])
Exemplo n.º 19
0
    def test_embed_resnet(self, setup_tensorflow):
        anno = EmbedAnnotator(freq=4, embedding=EmbedFrameKerasResNet50())
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("embed")

        assert set(obj_out.keys()) == set(["frame", "video", "embed"])
        assert issubclass(type(obj_out["embed"]), np.ndarray)
        assert obj_out["embed"].shape == (4, 2048)
Exemplo n.º 20
0
    def test_collect_all(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(FrameAnnotator())
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)

        output = fpobj.collect_all()

        assert output["base"] == DictFrame()
        assert output["diff"].shape[0] == (2 * 8)
Exemplo n.º 21
0
    def test_cutoff_empty(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator()
        agg = ca.aggregate(obj_out)

        assert agg["frame_start"] == list(range(256))
Exemplo n.º 22
0
    def test_png_resize(self):
        dname = tempfile.mkdtemp()  # creates directory

        fpobj = FrameProcessor()
        fpobj.load_annotator(PngAnnotator(output_dir=dname, size=(32, 64)))

        finput = FrameInput("test-data/video-clip.mp4", bsize=4)
        fpobj.process(finput, max_batch=2)

        expected_files = set(["frame-{0:06d}.png".format(x) for x in range(8)])
        obj_out = fpobj.collect("png")
        assert obj_out == DictFrame()
        assert set(os.listdir(dname)) == expected_files
Exemplo n.º 23
0
    def test_quantiles(self):

        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40, 50]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")
        keys = list(obj_out.keys())

        assert issubclass(type(obj_out["q40"]), np.ndarray)
        assert issubclass(type(obj_out["q50"]), np.ndarray)
        assert issubclass(type(obj_out["h40"]), np.ndarray)
        assert issubclass(type(obj_out["h50"]), np.ndarray)
Exemplo n.º 24
0
    def test_cutoff_ignore(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect_all()

        ca = CutAggregator(
            cut_vals={"h40": 0.2, "q40": 3}, ignore_vals={"avg_value": 70}
        )
        agg = ca.aggregate(obj_out)

        assert agg == DictFrame()
Exemplo n.º 25
0
    def test_channels(self):

        finput = FrameInput("test-data/video-clip.mp4", bsize=1)
        batch = get_batch(finput, batch_num=0)
        img = batch.get_frames()[0, :, :, :]
        face = DictFrame({"top": 0, "bottom": 96, "left": 0, "right": 96})

        femb = FaceEmbedVgg2()

        femb._iformat = "channels_first"
        emb1 = femb.embed(img, face)
        femb._iformat = "channels_last"
        emb2 = femb.embed(img, face)

        assert (emb1 != emb2).any()
Exemplo n.º 26
0
    def test_meta_output_video(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(MetaAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("meta")

        expected_keys = ["width", "type", "vname", "fps", "height", "frames"]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out["width"] == [708]
        assert obj_out["height"] == [480]
        assert obj_out["type"] == ["video"]
        assert obj_out["vname"] == ["video-clip.mp4"]
        assert obj_out["fps"] == [29.97002997002997]
        assert obj_out["frames"] == [379]
Exemplo n.º 27
0
    def test_face_detector_cutoff_mtcnn(self):
        anno = FaceAnnotator(detector=FaceDetectMtcnn(cutoff=0.99997), freq=4)
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("face")

        expected_keys = [
            "video",
            "frame",
            "confidence",
            "top",
            "bottom",
            "left",
            "right",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out.shape == (4, 7)
Exemplo n.º 28
0
    def test_object_detection(self):
        anno = ObjectAnnotator(freq=4, detector=ObjectDetectRetinaNet())
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("object")

        expected_keys = [
            "video",
            "frame",
            "score",
            "top",
            "bottom",
            "left",
            "right",
            "class",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out.shape == (17, 8)
Exemplo n.º 29
0
    def test_face_vgg2_embed(self):
        anno = FaceAnnotator(detector=FaceDetectDlib(),
                             embedding=FaceEmbedVgg2(),
                             freq=4)
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("face")

        expected_keys = [
            "video",
            "frame",
            "confidence",
            "top",
            "bottom",
            "left",
            "right",
            "embed",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert issubclass(type(obj_out["embed"]), np.ndarray)
        assert obj_out["embed"].shape == (8, 2048)
Exemplo n.º 30
0
def main(argv):
    try:
        opts, args = getopt.getopt(
            argv, "hf:q:c:h:",
            ["file=", "quantiles=", "cutvals=", "histdiff="])

    except getopt.GetoptError:
        print(
            'dvt_to_sat.py -f <filename> -q <quantiles> -c <cutvals> -h <histdiff>'
        )
        sys.exit(2)

    filename = ''
    quantiles = 40
    cutvals = 3
    histdiff = 0

    for opt, arg in opts:
        if opt == '-h':
            print(
                'dvt_to_sat.py -f <filename> -q <quantiles> -c <cutvals> -h <histdiff>'
            )
            sys.exit()
        elif opt in ("-f", "--file"):
            filename = arg
        elif opt in ("-q", "--quantiles"):
            quantiles = arg
        elif opt in ("-c", "--cutvals"):
            cutvals = arg
        elif opt in ("-h", "--histdiff"):
            cutvals = arg

    if (filename == ''):
        print('missing param: filename is required')
        sys.exit(2)

    basename = os.path.basename(filename)

    logging.basicConfig(level='INFO')
    finput = FrameInput(filename, bsize=128)
    fpobj = FrameProcessor()
    fpobj.load_annotator(MetaAnnotator())
    fpobj.load_annotator(DiffAnnotator(quantiles=[quantiles]))
    fpobj.process(finput)
    #fpobj.process(finput, max_batch=5)

    obj = fpobj.collect_all()
    meta = obj['meta'].todf()
    obj['diff'].todf().head()

    cagg = CutAggregator(cut_vals={
        'h40': histdiff,
        'q' + str(quantiles): cutvals
    })
    fps = meta['fps'][0]
    print(fps)
    df1 = cagg.aggregate(obj).todf()
    df1['seconds_start'] = df1['frame_start'] / fps
    df1['seconds_end'] = df1['frame_end'] / fps

    out = df1.to_csv()

    f = open(basename + ".csv", "w")
    f.write(out)
    f.close()