def get_detections_from_video(self, video_job): logger.info('[%s] Received video job: %s', video_job.job_name, video_job) if video_job.feed_forward_track is not None: return [video_job.feed_forward_track] echo_job, echo_media = self.get_echo_msgs(video_job) track1 = mpf.VideoTrack(0, 1) track1.frame_locations[0] = mpf.ImageLocation(1, 2, 3, 4, -1, { 'METADATA': 'test', 'ECHO_JOB': echo_job, 'ECHO_MEDIA': echo_media }) track1.frame_locations[1] = mpf.ImageLocation(5, 6, 7, 8, -1) track1.frame_locations[1].detection_properties['ECHO_JOB'] = echo_job track1.frame_locations[1].detection_properties[ 'ECHO_MEDIA'] = echo_media track1.detection_properties.update(video_job.job_properties) track1.detection_properties.update(video_job.media_properties) track2 = mpf.VideoTrack( 3, 4, -1, { 3: mpf.ImageLocation(9, 10, 11, 12, -1, [('ECHO_JOB', echo_job), ('ECHO_MEDIA', echo_media)]) }, mpf.Properties(ECHO_JOB=echo_job, ECHO_MEDIA=echo_media)) # Make sure regular collections are accepted return [track1, track2]
def get_detections_from_video_capture(self, video_job, video_capture): test = self._test for frame_index, frame in enumerate(video_capture): top_left_corner = frame[:20, :30] test.assertTrue(test_util.is_all_black(top_left_corner)) bottom_right_corner = frame[80:, 50:] test.assertTrue(test_util.is_all_black(bottom_right_corner)) top_right_corner = frame[:20, 50:] test.assertTrue(test_util.is_all_white(top_right_corner)) bottom_left_corner = frame[80:, :30] test.assertTrue(test_util.is_all_white(bottom_left_corner)) for corner in (top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner): test.assertEqual(mpf_util.Size(30, 20), mpf_util.Size.from_frame(corner)) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(0, 0, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(50, 80, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(50, 0, 30, 20)}) yield mpf.VideoTrack(frame_index, frame_index, frame_locations={frame_index: mpf.ImageLocation(0, 80, 30, 20)})
def test_can_handle_feed_forward_track(self): ff_track = mpf.VideoTrack(0, 29, frame_locations={ 1: mpf.ImageLocation(5, 5, 5, 10), 3: mpf.ImageLocation(4, 4, 5, 6), 7: mpf.ImageLocation(5, 5, 8, 9), 11: mpf.ImageLocation(4, 5, 5, 6), 12: mpf.ImageLocation(4, 4, 1, 2), 20: mpf.ImageLocation(5, 5, 5, 5), 25: mpf.ImageLocation(4, 4, 5, 5) }) job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, dict(FEED_FORWARD_TYPE='SUPERSET_REGION'), {}, ff_track) cap = mpf_util.VideoCapture(job) self.assertEqual(7, cap.frame_count) self.assertFalse(cap.get_initialization_frames_if_available(100)) min_x = ff_track.frame_locations[3].x_left_upper max_x = ff_track.frame_locations[ 7].x_left_upper + ff_track.frame_locations[7].width min_y = ff_track.frame_locations[3].y_left_upper max_y = ff_track.frame_locations[ 1].y_left_upper + ff_track.frame_locations[1].height expected_size = mpf_util.Size(max_x - min_x, max_y - min_y) self.assertEqual(expected_size, cap.frame_size) self.assert_frame_read(cap, 1, expected_size, 0) self.assert_frame_read(cap, 3, expected_size, 1 / 7) self.assert_frame_read(cap, 7, expected_size, 2 / 7) self.assert_frame_read(cap, 11, expected_size, 3 / 7) self.assert_frame_read(cap, 12, expected_size, 4 / 7) self.assert_frame_read(cap, 20, expected_size, 5 / 7) self.assert_frame_read(cap, 25, expected_size, 6 / 7) self.assertAlmostEqual(1, cap.frame_position_ratio) self.assert_read_fails(cap) il = mpf.ImageLocation(0, 1, 2, 3) track = mpf.VideoTrack(0, 6, frame_locations={ 1: il, 2: il, 4: il, 5: il }) cap.reverse_transform(track) self.assertEqual(1, track.start_frame) self.assertEqual(25, track.stop_frame) self.assert_dict_contains_keys(track.frame_locations, (3, 7, 12, 20))
def test_feed_forward_cropper_crop_to_exact_region(self): ff_track = mpf.VideoTrack(4, 29, frame_locations={ 4: mpf.ImageLocation(10, 60, 65, 125), 15: mpf.ImageLocation(60, 20, 100, 200), 29: mpf.ImageLocation(70, 0, 30, 240) }) job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 4, 29, dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track) cap = mpf_util.VideoCapture(job) output_track = mpf.VideoTrack(0, 2) frame_pos = cap.current_frame_position frame = next(cap) self.assertEqual(4, get_frame_number(frame)) self.assertEqual((65, 125), mpf_util.Size.from_frame(frame)) output_track.frame_locations[frame_pos] = mpf.ImageLocation( 0, 0, frame.shape[1], frame.shape[0]) frame_pos = cap.current_frame_position frame = next(cap) self.assertEqual(15, get_frame_number(frame)) self.assertEqual((100, 200), mpf_util.Size.from_frame(frame)) output_track.frame_locations[frame_pos] = mpf.ImageLocation( 0, 0, frame.shape[1], frame.shape[0]) frame_pos = cap.current_frame_position frame = next(cap) self.assertEqual(29, get_frame_number(frame)) self.assertEqual((30, 240), mpf_util.Size.from_frame(frame)) output_track.frame_locations[frame_pos] = mpf.ImageLocation( 5, 40, 15, 60) self.assert_read_fails(cap) cap.reverse_transform(output_track) self.assertEqual(len(ff_track.frame_locations), len(output_track.frame_locations)) self.assertEqual(ff_track.frame_locations[4], output_track.frame_locations[4]) self.assertEqual(ff_track.frame_locations[15], output_track.frame_locations[15]) last_detection = output_track.frame_locations[29] self.assertEqual(75, last_detection.x_left_upper) self.assertEqual(40, last_detection.y_left_upper) self.assertEqual(15, last_detection.width) self.assertEqual(60, last_detection.height)
def test_feed_forward_superset_region(self): ff_track = mpf.VideoTrack( 0, 2, -1, { 0: mpf.ImageLocation(60, 300, 100, 40, -1, dict(ROTATION='260')), 1: mpf.ImageLocation(160, 350, 130, 20, -1, dict(ROTATION='60')), 2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20')) }, {}) for rotation in range(0, 361, 20): job = mpf.VideoJob( 'Test', test_util.get_data_file_path( 'rotation/feed-forward-rotation-test.png'), ff_track.start_frame, ff_track.stop_frame, dict(FEED_FORWARD_TYPE='SUPERSET_REGION', ROTATION=str(rotation)), {}, ff_track) expected_min_num_blue = 0 expected_max_num_blue = 0 for il in ff_track.frame_locations.values(): area = il.width * il.height perimeter = 2 * il.width + 2 * il.height expected_min_num_blue += area - perimeter expected_max_num_blue += area + perimeter frame = next(mpf_util.VideoCapture(job)) actual_num_blue = count_matching_pixels(frame, (255, 0, 0)) # Color of pixels along edges gets blended with nearby pixels during interpolation. self.assertLessEqual(actual_num_blue, expected_max_num_blue) self.assertGreaterEqual(actual_num_blue, expected_min_num_blue)
def test_feed_forward_exact_region(self): ff_track = mpf.VideoTrack( 0, 2, -1, { 0: mpf.ImageLocation(60, 300, 100, 40, -1, dict(ROTATION='260')), 1: mpf.ImageLocation(160, 350, 130, 20, -1, dict(ROTATION='60')), 2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20')) }, {}) job = mpf.VideoJob( 'Test', test_util.get_data_file_path( 'rotation/feed-forward-rotation-test.png'), ff_track.start_frame, ff_track.stop_frame, dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track) test_img = cv2.imread(job.data_uri) transformer = frame_transformer_factory.get_transformer( job, mpf_util.Size.from_frame(test_img)) for frame_number, ff_detection in ff_track.frame_locations.items(): frame = transformer.transform_frame(test_img, frame_number) frame_size = mpf_util.Size.from_frame(frame) self.assertEqual(frame_size, (ff_detection.width, ff_detection.height)) self.assert_image_color(frame, (255, 0, 0)) size_as_tuple = typing.cast(typing.Tuple[int, int], frame_size) new_detection = mpf.ImageLocation(0, 0, *size_as_tuple) transformer.reverse_transform(new_detection, frame_number) self.assert_detections_same_location(new_detection, ff_detection)
def create_test_track(): return mpf.VideoTrack(5, 10, frame_locations={ 5: mpf.ImageLocation(20, 30, 15, 5), 7: mpf.ImageLocation(0, 1, 2, 3), 10: mpf.ImageLocation(4, 5, 6, 7) })
def test_can_fix_frame_pos_in_reverse_transform(self): cap = create_video_capture(5, 19, 2) il = mpf.ImageLocation(0, 1, 2, 3) track = mpf.VideoTrack(1, 6, frame_locations={1: il, 2: il, 6: il}) cap.reverse_transform(track) self.assertEqual(7, track.start_frame) self.assertEqual(17, track.stop_frame) self.assert_dict_contains_keys((7, 9, 17), track.frame_locations)
def to_feed_forward_filter(interval_filter): frame_count = interval_filter.get_segment_frame_count() frame_location_map = dict() for i in range(frame_count): original_pos = interval_filter.segment_to_original_frame_position(i) frame_location_map[original_pos] = mpf.ImageLocation(0, 0, 0, 0) ff_track = mpf.VideoTrack(0, frame_count, frame_locations=frame_location_map) return FeedForwardFrameFilter(ff_track)
def test_can_use_search_region_with_feed_forward_frame_type(self): ff_track = mpf.VideoTrack(0, 15, frame_locations={1: mpf.ImageLocation(5, 5, 5, 5)}) job_properties = dict( FEED_FORWARD_TYPE='FRAME', SEARCH_REGION_ENABLE_DETECTION='True', SEARCH_REGION_TOP_LEFT_X_DETECTION='3', SEARCH_REGION_TOP_LEFT_Y_DETECTION='3', SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='6', SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='8') job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, job_properties, {}, ff_track) cap = mpf_util.VideoCapture(job) expected_size = mpf_util.Size(3, 5) self.assertEqual(expected_size, cap.frame_size) frame = next(cap) self.assertEqual(expected_size.width, frame.shape[1]) self.assertEqual(expected_size.height, frame.shape[0])
def get_detections_from_video_capture(video_job, video_capture): logger.info('[%s] Received video job: %s', video_job.job_name, video_job) model = get_model(video_job) # A real component would use the model. width, height = video_capture.frame_size detections = dict() expand_rate = 5 last_il = mpf.ImageLocation(0, 0, 1, 1) last_frame_read = 0 for idx, frame in enumerate(video_capture): last_frame_read = idx last_il = mpf.ImageLocation( 0, 0, min(width - 1, last_il.width + expand_rate), min(height - 1, last_il.height + expand_rate)) detections[idx] = last_il if not detections: return () return [mpf.VideoTrack(0, last_frame_read, frame_locations=detections)]