def get_detections_from_video_capture(self, video_job, video_capture): test = self._test for frame_index, frame in enumerate(video_capture): top_left_corner = frame[:20, :30] test.assertTrue(test_util.is_all_black(top_left_corner)) bottom_right_corner = frame[80:, 50:] test.assertTrue(test_util.is_all_black(bottom_right_corner)) top_right_corner = frame[:20, 50:] test.assertTrue(test_util.is_all_white(top_right_corner)) bottom_left_corner = frame[80:, :30] test.assertTrue(test_util.is_all_white(bottom_left_corner)) for corner in (top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner): test.assertEqual(mpf_util.Size(30, 20), mpf_util.Size.from_frame(corner)) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(0, 0, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(50, 80, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(50, 0, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(0, 80, 30, 20)})
def test_rect_intersection(self): rect1 = mpf_util.Rect(0, 0, 8, 10) rect2 = mpf_util.Rect(2, 2, 4, 3) intersection = rect1.intersection(rect2) self.assertEqual(intersection, rect2.intersection(rect1)) # rect1 encloses rect2 self.assertEqual(rect2, intersection) rect1 = mpf_util.Rect(2, 6, 5, 10) rect2_args: Tuple = (mpf_util.Point(4, 3), mpf_util.Point(10, 12)) intersection = rect1.intersection(rect2_args) self.assertEqual(intersection, mpf_util.Rect.from_corners(*rect2_args).intersection(rect1)) self.assertEqual((4, 6, 3, 6), intersection) rect1 = mpf_util.Rect(1, 3, 8, 4) rect2_args = ((6, 5), mpf_util.Size(9, 5)) intersection = rect1.intersection(rect2_args) self.assertEqual(intersection, mpf_util.Rect.from_corner_and_size(*rect2_args).intersection(rect1)) self.assertEqual((6, 5, 3, 2), intersection) # Rects with no overlap rect1 = mpf_util.Rect(0, 0, 5, 5) rect2 = mpf_util.Rect(8, 8, 4, 4) intersection = rect1.intersection(rect2) self.assertEqual(intersection, rect2.intersection(rect1)) self.assertEqual((0, 0, 0, 0), intersection) rect1 = mpf_util.Rect(0, 0, 5, 5) rect2 = mpf_util.Rect(0, 0, 5, 5) intersection = rect1.intersection(rect2) self.assertEqual(intersection, rect2.intersection(rect1)) self.assertEqual(intersection, rect1)
def test_rect_info_methods(self): test_rect = mpf_util.Rect(2, 3, 4, 5) self.assertEqual(20, test_rect.area) self.assertFalse(test_rect.empty) self.assertEqual(mpf_util.Point(2, 3), test_rect.tl) self.assertEqual(mpf_util.Point(6, 8), test_rect.br) self.assertEqual(mpf_util.Size(4, 5), test_rect.size)
def test_can_handle_feed_forward_track(self): ff_track = mpf.VideoTrack(0, 29, frame_locations={ 1: mpf.ImageLocation(5, 5, 5, 10), 3: mpf.ImageLocation(4, 4, 5, 6), 7: mpf.ImageLocation(5, 5, 8, 9), 11: mpf.ImageLocation(4, 5, 5, 6), 12: mpf.ImageLocation(4, 4, 1, 2), 20: mpf.ImageLocation(5, 5, 5, 5), 25: mpf.ImageLocation(4, 4, 5, 5) }) job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, dict(FEED_FORWARD_TYPE='SUPERSET_REGION'), {}, ff_track) cap = mpf_util.VideoCapture(job) self.assertEqual(7, cap.frame_count) self.assertFalse(cap.get_initialization_frames_if_available(100)) min_x = ff_track.frame_locations[3].x_left_upper max_x = ff_track.frame_locations[ 7].x_left_upper + ff_track.frame_locations[7].width min_y = ff_track.frame_locations[3].y_left_upper max_y = ff_track.frame_locations[ 1].y_left_upper + ff_track.frame_locations[1].height expected_size = mpf_util.Size(max_x - min_x, max_y - min_y) self.assertEqual(expected_size, cap.frame_size) self.assert_frame_read(cap, 1, expected_size, 0) self.assert_frame_read(cap, 3, expected_size, 1 / 7) self.assert_frame_read(cap, 7, expected_size, 2 / 7) self.assert_frame_read(cap, 11, expected_size, 3 / 7) self.assert_frame_read(cap, 12, expected_size, 4 / 7) self.assert_frame_read(cap, 20, expected_size, 5 / 7) self.assert_frame_read(cap, 25, expected_size, 6 / 7) self.assertAlmostEqual(1, cap.frame_position_ratio) self.assert_read_fails(cap) il = mpf.ImageLocation(0, 1, 2, 3) track = mpf.VideoTrack(0, 6, frame_locations={ 1: il, 2: il, 4: il, 5: il }) cap.reverse_transform(track) self.assertEqual(1, track.start_frame) self.assertEqual(25, track.stop_frame) self.assert_dict_contains_keys(track.frame_locations, (3, 7, 12, 20))
def test_can_use_search_region_with_feed_forward_frame_type(self): ff_track = mpf.VideoTrack(0, 15, frame_locations={1: mpf.ImageLocation(5, 5, 5, 5)}) job_properties = dict( FEED_FORWARD_TYPE='FRAME', SEARCH_REGION_ENABLE_DETECTION='True', SEARCH_REGION_TOP_LEFT_X_DETECTION='3', SEARCH_REGION_TOP_LEFT_Y_DETECTION='3', SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='6', SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='8') job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, job_properties, {}, ff_track) cap = mpf_util.VideoCapture(job) expected_size = mpf_util.Size(3, 5) self.assertEqual(expected_size, cap.frame_size) frame = next(cap) self.assertEqual(expected_size.width, frame.shape[1]) self.assertEqual(expected_size.height, frame.shape[0])
def test_full_frame_orthogonal_rotation(self): size = mpf_util.Size(640, 480) # noinspection PyTypeChecker img = np.full(shape=(size.height, size.width, 3), fill_value=(255, 255, 255), dtype=np.uint8) for rotation in (0, 90, 180, 270): transformer = AffineFrameTransformer.rotate_full_frame( rotation, False, NoOpTransformer(size)) transformed_img = transformer.transform_frame(img, 0) num_white = count_matching_pixels(transformed_img, (255, 255, 255)) self.assertEqual(num_white, size.area) if rotation in (90, 270): self.assertEqual((size.height, size.width), mpf_util.Size.from_frame(transformed_img)) else: self.assertEqual(size, mpf_util.Size.from_frame(transformed_img))
def get_detections_from_image_reader(self, image_job, image_reader): test = self._test image = image_reader.get_image() top_left_corner = image[:20, :30] test.assertTrue(test_util.is_all_black(top_left_corner)) bottom_right_corner = image[80:, 50:] test.assertTrue(test_util.is_all_black(bottom_right_corner)) top_right_corner = image[:20, 50:] test.assertTrue(test_util.is_all_white(top_right_corner)) bottom_left_corner = image[80:, :30] test.assertTrue(test_util.is_all_white(bottom_left_corner)) for corner in (top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner): test.assertEqual(mpf_util.Size(30, 20), mpf_util.Size.from_frame(corner)) yield mpf.ImageLocation(0, 0, 30, 20) yield mpf.ImageLocation(50, 80, 30, 20) yield mpf.ImageLocation(50, 0, 30, 20) yield mpf.ImageLocation(0, 80, 30, 20)
def assert_search_region_matches_rect(self, expected_region, search_region): self.assertEqual(expected_region, search_region.get_rect(mpf_util.Size(50, 100)))
def test_rect_creation(self): test_rect = mpf_util.Rect(2, 3, 4, 5) self.assertEqual(test_rect, (2, 3, 4, 5)) self.assertEqual(test_rect, mpf_util.Rect.from_corner_and_size((2, 3), (4, 5))) self.assertEqual(test_rect, mpf_util.Rect.from_corner_and_size(mpf_util.Point(2, 3), mpf_util.Size(4, 5))) self.assertEqual(test_rect, mpf_util.Rect.from_corner_and_size(mpf_util.Point(2, 3), (4, 5))) self.assertEqual(test_rect, mpf_util.Rect.from_corners((2, 3), (6, 8))) self.assertEqual(test_rect, mpf_util.Rect.from_corners(mpf_util.Point(2, 3), mpf_util.Point(6, 8)))