Пример #1
0
    def test_fixed_frames(self):
        # only grab these frames
        frames = [0, 3, 17, 18, 21]

        # create processor pipeline
        fpobj = FrameProcessor()
        fpobj.load_annotator(CIElabAnnotator(frames=frames))
        fpobj.load_annotator(ClutterAnnotator(frames=frames))
        fpobj.load_annotator(
            EmbedAnnotator(embedding=EmbedFrameKerasResNet50(), frames=frames))
        fpobj.load_annotator(
            FaceAnnotator(detector=FaceDetectDlib(), frames=frames))
        fpobj.load_annotator(
            ObjectAnnotator(detector=ObjectDetectRetinaNet(), frames=frames))

        # run over the input, making sure to include a batch (the middle one)
        # that does not have any data
        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=3)

        # check that the output is the correct size
        assert fpobj.collect("clutter")["frame"].tolist() == frames
        assert fpobj.collect("cielab")["frame"].tolist() == frames
        assert fpobj.collect("embed")["frame"].tolist() == frames
        assert set(fpobj.collect("face")["frame"]) == set(frames)
        assert set(fpobj.collect("object")["frame"]) == set(frames)
Пример #2
0
    def test_process_empty_output(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(FrameAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=4)
        fpobj.process(finput)

        assert fpobj.collect("base") == DictFrame()
Пример #3
0
    def test_process_full_output(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=128)
        fpobj.process(finput)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (128 * 3)
Пример #4
0
    def test_max_batch(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=3)

        obj_diff = fpobj.collect("diff")
        assert obj_diff.shape[0] == (3 * 8)
Пример #5
0
    def test_no_quantiles(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")

        assert set(obj_out.keys()) == set(["video", "frame", "avg_value"])
        assert issubclass(type(obj_out["avg_value"]), np.ndarray)
Пример #6
0
    def test_object_detection_cutoff(self):
        anno = ObjectAnnotator(freq=4,
                               detector=ObjectDetectRetinaNet(cutoff=0.6))
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("object")

        assert obj_out.shape == (12, 8)
Пример #7
0
    def test_histogram_only(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(CIElabAnnotator(num_dominant=0))

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("cielab")
        keys = list(obj_out.keys())

        assert set(obj_out.keys()) == set(["video", "frame", "cielab"])
        assert issubclass(type(obj_out["cielab"]), np.ndarray)
        assert obj_out["cielab"].shape == (16, 4096)
Пример #8
0
    def test_clutter(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(ClutterAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("clutter")
        keys = list(obj_out.keys())

        assert set(obj_out.keys()) == set(["video", "frame", "clutter"])
        assert issubclass(type(obj_out["clutter"]), np.ndarray)
        assert obj_out["clutter"].shape == (16, 1)
Пример #9
0
    def test_embed_resnet(self, setup_tensorflow):
        anno = EmbedAnnotator(freq=4, embedding=EmbedFrameKerasResNet50())
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("embed")

        assert set(obj_out.keys()) == set(["frame", "video", "embed"])
        assert issubclass(type(obj_out["embed"]), np.ndarray)
        assert obj_out["embed"].shape == (4, 2048)
Пример #10
0
    def test_png_resize(self):
        dname = tempfile.mkdtemp()  # creates directory

        fpobj = FrameProcessor()
        fpobj.load_annotator(PngAnnotator(output_dir=dname, size=(32, 64)))

        finput = FrameInput("test-data/video-clip.mp4", bsize=4)
        fpobj.process(finput, max_batch=2)

        expected_files = set(["frame-{0:06d}.png".format(x) for x in range(8)])
        obj_out = fpobj.collect("png")
        assert obj_out == DictFrame()
        assert set(os.listdir(dname)) == expected_files
Пример #11
0
    def test_quantiles(self):

        fpobj = FrameProcessor()
        fpobj.load_annotator(DiffAnnotator(quantiles=[40, 50]))

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("diff")
        keys = list(obj_out.keys())

        assert issubclass(type(obj_out["q40"]), np.ndarray)
        assert issubclass(type(obj_out["q50"]), np.ndarray)
        assert issubclass(type(obj_out["h40"]), np.ndarray)
        assert issubclass(type(obj_out["h50"]), np.ndarray)
Пример #12
0
    def test_meta_output_video(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(MetaAnnotator())

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("meta")

        expected_keys = ["width", "type", "vname", "fps", "height", "frames"]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out["width"] == [708]
        assert obj_out["height"] == [480]
        assert obj_out["type"] == ["video"]
        assert obj_out["vname"] == ["video-clip.mp4"]
        assert obj_out["fps"] == [29.97002997002997]
        assert obj_out["frames"] == [379]
Пример #13
0
    def test_meta_output_images(self):
        fpobj = FrameProcessor()
        fpobj.load_annotator(MetaAnnotator())

        iobj = ImageInput(input_paths=[
            "test-data/img/frame-000076.png",
            "test-data/img/frame-000506.png",
        ])
        fpobj.process(iobj, max_batch=2)
        obj_out = fpobj.collect("meta")

        expected_keys = ["width", "type", "vname", "height"]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out["width"] == [-1]
        assert obj_out["height"] == [-1]
        assert obj_out["type"] == ["image"]
        assert obj_out["vname"] == [""]
Пример #14
0
    def test_face_detector_cutoff_mtcnn(self):
        anno = FaceAnnotator(detector=FaceDetectMtcnn(cutoff=0.99997), freq=4)
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("face")

        expected_keys = [
            "video",
            "frame",
            "confidence",
            "top",
            "bottom",
            "left",
            "right",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out.shape == (4, 7)
Пример #15
0
    def test_object_detection(self):
        anno = ObjectAnnotator(freq=4, detector=ObjectDetectRetinaNet())
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("object")

        expected_keys = [
            "video",
            "frame",
            "score",
            "top",
            "bottom",
            "left",
            "right",
            "class",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert obj_out.shape == (17, 8)
Пример #16
0
    def test_png_image(self):
        dname = tempfile.mkdtemp()  # creates directory

        fpobj = FrameProcessor()
        fpobj.load_annotator(PngAnnotator(output_dir=dname))

        iobj = ImageInput(input_paths=[
            "test-data/img/frame-000076.png",
            "test-data/img/frame-000506.png",
        ])
        fpobj.process(iobj, max_batch=2)

        # assert that all of the images in the input exist in output
        expected_files = set(["frame-000076.png", "frame-000506.png"])
        obj_out = fpobj.collect("png")
        assert obj_out == DictFrame()
        assert set(os.listdir(dname)) == expected_files

        # make sure images are the same
        img1 = cv2.imread(os.path.join("test-data/img/frame-000076.png"))
        img2 = cv2.imread(os.path.join(dname, "frame-000076.png"))
        assert np.all(img1 == img2)
Пример #17
0
    def test_face_vgg2_embed(self):
        anno = FaceAnnotator(detector=FaceDetectDlib(),
                             embedding=FaceEmbedVgg2(),
                             freq=4)
        fpobj = FrameProcessor()
        fpobj.load_annotator(anno)

        finput = FrameInput("test-data/video-clip.mp4", bsize=8)
        fpobj.process(finput, max_batch=2)
        obj_out = fpobj.collect("face")

        expected_keys = [
            "video",
            "frame",
            "confidence",
            "top",
            "bottom",
            "left",
            "right",
            "embed",
        ]
        assert set(obj_out.keys()) == set(expected_keys)
        assert issubclass(type(obj_out["embed"]), np.ndarray)
        assert obj_out["embed"].shape == (8, 2048)