def test_can_filter_on_key_frames_and_interval(self):
        job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 0, 1000, dict(USE_KEY_FRAMES='true', FRAME_INTERVAL='2'),
                           {}, None)
        cap = mpf_util.VideoCapture(job)
        self.assert_expected_frames_shown(cap, (0, 10, 20))

        job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 0, 1000, dict(USE_KEY_FRAMES='true', FRAME_INTERVAL='3'),
                           {}, None)
        cap = mpf_util.VideoCapture(job)
        self.assert_expected_frames_shown(cap, (0, 15))
Beispiel #2
0
    def test_feed_forward_superset_region(self):
        ff_track = mpf.VideoTrack(
            0, 2, -1, {
                0: mpf.ImageLocation(60, 300, 100, 40, -1,
                                     dict(ROTATION='260')),
                1: mpf.ImageLocation(160, 350, 130, 20, -1,
                                     dict(ROTATION='60')),
                2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20'))
            }, {})

        for rotation in range(0, 361, 20):
            job = mpf.VideoJob(
                'Test',
                test_util.get_data_file_path(
                    'rotation/feed-forward-rotation-test.png'),
                ff_track.start_frame, ff_track.stop_frame,
                dict(FEED_FORWARD_TYPE='SUPERSET_REGION',
                     ROTATION=str(rotation)), {}, ff_track)
            expected_min_num_blue = 0
            expected_max_num_blue = 0
            for il in ff_track.frame_locations.values():
                area = il.width * il.height
                perimeter = 2 * il.width + 2 * il.height
                expected_min_num_blue += area - perimeter
                expected_max_num_blue += area + perimeter

            frame = next(mpf_util.VideoCapture(job))
            actual_num_blue = count_matching_pixels(frame, (255, 0, 0))
            # Color of pixels along edges gets blended with nearby pixels during interpolation.
            self.assertLessEqual(actual_num_blue, expected_max_num_blue)
            self.assertGreaterEqual(actual_num_blue, expected_min_num_blue)
Beispiel #3
0
    def test_feed_forward_exact_region(self):
        ff_track = mpf.VideoTrack(
            0, 2, -1, {
                0: mpf.ImageLocation(60, 300, 100, 40, -1,
                                     dict(ROTATION='260')),
                1: mpf.ImageLocation(160, 350, 130, 20, -1,
                                     dict(ROTATION='60')),
                2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20'))
            }, {})
        job = mpf.VideoJob(
            'Test',
            test_util.get_data_file_path(
                'rotation/feed-forward-rotation-test.png'),
            ff_track.start_frame, ff_track.stop_frame,
            dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track)

        test_img = cv2.imread(job.data_uri)

        transformer = frame_transformer_factory.get_transformer(
            job, mpf_util.Size.from_frame(test_img))

        for frame_number, ff_detection in ff_track.frame_locations.items():
            frame = transformer.transform_frame(test_img, frame_number)
            frame_size = mpf_util.Size.from_frame(frame)
            self.assertEqual(frame_size,
                             (ff_detection.width, ff_detection.height))
            self.assert_image_color(frame, (255, 0, 0))

            size_as_tuple = typing.cast(typing.Tuple[int, int], frame_size)
            new_detection = mpf.ImageLocation(0, 0, *size_as_tuple)
            transformer.reverse_transform(new_detection, frame_number)
            self.assert_detections_same_location(new_detection, ff_detection)
Beispiel #4
0
def create_video_job(start_frame, stop_frame, frame_interval=None):
    job_properties = dict()
    if frame_interval is not None:
        job_properties['FRAME_INTERVAL'] = str(frame_interval)

    return mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, start_frame,
                        stop_frame, job_properties, {}, None)
Beispiel #5
0
    def test_video_capture_mixin(self):
        job_properties = {
            'ROTATION': '270',
            'HORIZONTAL_FLIP': 'True',
            'SEARCH_REGION_ENABLE_DETECTION': 'true',
            'SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION': '80',
            'SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION': '100'
        }

        # Image is treated like a single frame video.
        job = mpf.VideoJob('Test',
                           test_util.get_data_file_path('test_img.png'), 0, 0,
                           job_properties, dict(FRAME_COUNT='1'), None)

        component = VideoCaptureMixinComponent(self)
        results = list(component.get_detections_from_video(job))
        self.assertEqual(4, len(results))
        self.assertEqual(
            (319, 199, 30, 20),
            mpf_util.Rect.from_image_location(results[0].frame_locations[0]))
        self.assertEqual(
            (239, 149, 30, 20),
            mpf_util.Rect.from_image_location(results[1].frame_locations[0]))
        self.assertEqual(
            (319, 149, 30, 20),
            mpf_util.Rect.from_image_location(results[2].frame_locations[0]))
        self.assertEqual(
            (239, 199, 30, 20),
            mpf_util.Rect.from_image_location(results[3].frame_locations[0]))
Beispiel #6
0
    def test_reverse_transform_no_feed_forward_with_search_region(self):
        job = mpf.VideoJob(
            'Test', FRAME_FILTER_TEST_VIDEO, 0, 30,
            dict(SEARCH_REGION_ENABLE_DETECTION='true',
                 SEARCH_REGION_TOP_LEFT_X_DETECTION='3',
                 SEARCH_REGION_TOP_LEFT_Y_DETECTION='4',
                 SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='40',
                 SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='50'), {}, None)
        cap = mpf_util.VideoCapture(job)

        self.assertEqual((37, 46), cap.frame_size)

        track = create_test_track()
        cap.reverse_transform(track)

        self.assertEqual(track.start_frame, 5)
        self.assertEqual(track.stop_frame, 10)
        self.assertEqual(3, len(track.frame_locations))
        self.assert_dict_contains_keys(track.frame_locations, (5, 7, 10))

        location = track.frame_locations[5]
        self.assertEqual(23, location.x_left_upper)
        self.assertEqual(34, location.y_left_upper)
        self.assertEqual(15, location.width)
        self.assertEqual(5, location.height)
    def test_vfr_handling(self):
        target_frame = 40
        vfr_cap = mpf_util.VideoCapture(mpf.VideoJob(
            'Test', VIDEO_WITH_SET_FRAME_ISSUE, target_frame, 82, {}, {}))
        cfr_cap = mpf_util.VideoCapture(mpf.VideoJob(
            'Test', VIDEO_WITH_SET_FRAME_ISSUE, target_frame, 82, {},
            {'HAS_CONSTANT_FRAME_RATE': 'true'}))

        vfr_frame = next(vfr_cap)
        cfr_frame = next(cfr_cap)
        self.assertFalse(np.array_equal(vfr_frame, cfr_frame))

        sequential_cap = cv2.VideoCapture(VIDEO_WITH_SET_FRAME_ISSUE)
        for i in range(target_frame):
            sequential_cap.grab()
        sequential_frame = sequential_cap.read()[1]
        self.assertTrue(np.array_equal(vfr_frame, sequential_frame))
    def test_mpf_video_capture_does_not_have_set_frame_position_issue(self):
        # This test verifies that mpf_util.VideoCapture does not have the same issue demonstrated in the
        # test_cv_video_capture_set_frame_position_issue test case.
        job = mpf.VideoJob('Test', VIDEO_WITH_SET_FRAME_ISSUE, 0, 1000, {}, {}, None)
        cap = mpf_util.VideoCapture(job, False, False)
        frame_count = cap.frame_count
        cap.set_frame_position(frame_count - 5)

        was_read, _ = cap.read()
        self.assertTrue(was_read)
Beispiel #9
0
    def test_can_handle_feed_forward_track(self):
        ff_track = mpf.VideoTrack(0,
                                  29,
                                  frame_locations={
                                      1: mpf.ImageLocation(5, 5, 5, 10),
                                      3: mpf.ImageLocation(4, 4, 5, 6),
                                      7: mpf.ImageLocation(5, 5, 8, 9),
                                      11: mpf.ImageLocation(4, 5, 5, 6),
                                      12: mpf.ImageLocation(4, 4, 1, 2),
                                      20: mpf.ImageLocation(5, 5, 5, 5),
                                      25: mpf.ImageLocation(4, 4, 5, 5)
                                  })

        job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1,
                           dict(FEED_FORWARD_TYPE='SUPERSET_REGION'), {},
                           ff_track)

        cap = mpf_util.VideoCapture(job)
        self.assertEqual(7, cap.frame_count)
        self.assertFalse(cap.get_initialization_frames_if_available(100))

        min_x = ff_track.frame_locations[3].x_left_upper
        max_x = ff_track.frame_locations[
            7].x_left_upper + ff_track.frame_locations[7].width
        min_y = ff_track.frame_locations[3].y_left_upper
        max_y = ff_track.frame_locations[
            1].y_left_upper + ff_track.frame_locations[1].height
        expected_size = mpf_util.Size(max_x - min_x, max_y - min_y)
        self.assertEqual(expected_size, cap.frame_size)

        self.assert_frame_read(cap, 1, expected_size, 0)
        self.assert_frame_read(cap, 3, expected_size, 1 / 7)
        self.assert_frame_read(cap, 7, expected_size, 2 / 7)
        self.assert_frame_read(cap, 11, expected_size, 3 / 7)
        self.assert_frame_read(cap, 12, expected_size, 4 / 7)
        self.assert_frame_read(cap, 20, expected_size, 5 / 7)
        self.assert_frame_read(cap, 25, expected_size, 6 / 7)

        self.assertAlmostEqual(1, cap.frame_position_ratio)
        self.assert_read_fails(cap)

        il = mpf.ImageLocation(0, 1, 2, 3)
        track = mpf.VideoTrack(0,
                               6,
                               frame_locations={
                                   1: il,
                                   2: il,
                                   4: il,
                                   5: il
                               })
        cap.reverse_transform(track)
        self.assertEqual(1, track.start_frame)
        self.assertEqual(25, track.stop_frame)
        self.assert_dict_contains_keys(track.frame_locations, (3, 7, 12, 20))
    def test_mpf_video_capture_does_not_have_get_frame_position_issue(self):
        # This test verifies that mpf_util.VideoCapture does not have the same issue demonstrated in the
        # test_cv_video_capture_get_frame_position_issue test case.
        job = mpf.VideoJob('Test', VIDEO_WITH_SET_FRAME_ISSUE, 0, 1000, {}, {}, None)
        cap = mpf_util.VideoCapture(job, False, False)

        cap.read()
        cap.set_frame_position(10)
        cap.read()

        frame_position = cap.current_frame_position
        self.assertEqual(11, frame_position)
Beispiel #11
0
    def test_feed_forward_cropper_crop_to_exact_region(self):
        ff_track = mpf.VideoTrack(4,
                                  29,
                                  frame_locations={
                                      4: mpf.ImageLocation(10, 60, 65, 125),
                                      15: mpf.ImageLocation(60, 20, 100, 200),
                                      29: mpf.ImageLocation(70, 0, 30, 240)
                                  })
        job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 4, 29,
                           dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track)
        cap = mpf_util.VideoCapture(job)
        output_track = mpf.VideoTrack(0, 2)

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(4, get_frame_number(frame))
        self.assertEqual((65, 125), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            0, 0, frame.shape[1], frame.shape[0])

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(15, get_frame_number(frame))
        self.assertEqual((100, 200), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            0, 0, frame.shape[1], frame.shape[0])

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(29, get_frame_number(frame))
        self.assertEqual((30, 240), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            5, 40, 15, 60)

        self.assert_read_fails(cap)

        cap.reverse_transform(output_track)
        self.assertEqual(len(ff_track.frame_locations),
                         len(output_track.frame_locations))

        self.assertEqual(ff_track.frame_locations[4],
                         output_track.frame_locations[4])
        self.assertEqual(ff_track.frame_locations[15],
                         output_track.frame_locations[15])

        last_detection = output_track.frame_locations[29]
        self.assertEqual(75, last_detection.x_left_upper)
        self.assertEqual(40, last_detection.y_left_upper)
        self.assertEqual(15, last_detection.width)
        self.assertEqual(60, last_detection.height)
    def test_reverse_transform_no_feed_forward_no_search_region(self):
        job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 0, 30, {}, {}, None)
        cap = mpf_util.VideoCapture(job)

        track = create_test_track()
        cap.reverse_transform(track)
        self.assertEqual(5, track.start_frame)
        self.assertEqual(10, track.stop_frame)

        self.assertEqual(3, len(track.frame_locations))
        self.assert_dict_contains_keys(track.frame_locations, (5, 7, 10))

        location = track.frame_locations[5]
        self.assertEqual(20, location.x_left_upper)
        self.assertEqual(30, location.y_left_upper)
        self.assertEqual(15, location.width)
        self.assertEqual(5, location.height)
    def test_can_use_search_region_with_feed_forward_frame_type(self):
        ff_track = mpf.VideoTrack(0, 15, frame_locations={1: mpf.ImageLocation(5, 5, 5, 5)})
        job_properties = dict(
            FEED_FORWARD_TYPE='FRAME',
            SEARCH_REGION_ENABLE_DETECTION='True',
            SEARCH_REGION_TOP_LEFT_X_DETECTION='3',
            SEARCH_REGION_TOP_LEFT_Y_DETECTION='3',
            SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='6',
            SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='8')

        job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, job_properties, {}, ff_track)
        cap = mpf_util.VideoCapture(job)

        expected_size = mpf_util.Size(3, 5)
        self.assertEqual(expected_size, cap.frame_size)

        frame = next(cap)
        self.assertEqual(expected_size.width, frame.shape[1])
        self.assertEqual(expected_size.height, frame.shape[0])
 def _(self, video_path: str):
     self.__init__(mpf.VideoJob('', video_path, 0, -1, {}, {}), False,
                   False)
Beispiel #15
0
 def test_can_filter_on_key_frames_and_start_stop_frame(self):
     job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 6, 21,
                        dict(USE_KEY_FRAMES='true'), {}, None)
     cap = mpf_util.VideoCapture(job)
     self.assert_expected_frames_shown(cap, (10, 15, 20))