def test_from_camera(self): class MockVideoSource: def __init__(self, *args): pass def set(self, *args): pass def read(self): return True, np.zeros((2, 3)) with patch('cv2.VideoCapture', MockVideoSource): video_source = VideoSource(camera_id=0) assert video_source._frames is None assert video_source.get_image() is not None
def __init__( self, neural_network: RealtimeNeuralNet, post_processors: Union[PostProcessor, List[PostProcessor]], results_display: DisplayResults, callbacks: Optional[List[Callable]] = None, camera_id: int = 0, path_in: Optional[str] = None, path_out: Optional[str] = None, use_gpu: bool = True, stop_event: Optional[multiprocessing.Event] = None): """ :param neural_network: The neural network that produces the predictions for the camera image. :param post_processors: Post processors that are applied to the generated predictions to filter or manipulate the data. Refer to the PostProcessor class for more information. :param callbacks: A list of functions that are called in each loop iteration once the inference is started. The input dict always contains the key 'prediction' under which an np.ndarray with the raw predictions is stored. The presence of other keys depend on the choice of post processors. The callbacks should return True if the inference should continue, False otherwise. :param results_display: A display window which shows the current camera image as well as the prediction with the highest probability :param camera_id: The index of the webcam that is used. Default id is 0. :param path_in: If provided, use a video file located at the path as the input to the model :param path_out: If provided, store the captured video in a file in this location :param use_gpu: If True, run the model on the GPU :param stop_event: Event for signalling to stop model inference """ self.inference_engine = InferenceEngine(neural_network, use_gpu=use_gpu) video_source = VideoSource( camera_id=camera_id, size=self.inference_engine.expected_frame_size, filename=path_in, target_fps=self.inference_engine.fps, ) self.video_stream = VideoStream(video_source, self.inference_engine.fps) if isinstance(post_processors, list): self.postprocessors = post_processors else: self.postprocessors = [post_processors] self.callbacks = callbacks or [] self.frame_index = None self.clip = None self.results_display = results_display self.path_out = path_out self.video_recorder = None # created in `display_prediction` self.video_recorder_raw = None # created in `display_prediction` self.stop_event = stop_event
class TestVideoStream(unittest.TestCase): def setUp(self) -> None: self.video = VideoSource(filename=VIDEO_PATH) self.stream = VideoStream(video_source=self.video, fps=12.0) def test_stop(self): self.stream.stop() self.assertTrue(self.stream._shutdown) def test_extract_image(self): img_tuple = self.video.get_image() self.stream.frames.put(img_tuple, False) imgs = self.stream.get_image() assert type(imgs[0]) == np.ndarray def test_frame_conditions(self): self.stream.run() self.assertTrue(self.stream.frames.full()) self.assertTrue(self.stream._shutdown)
def test_from_file_change_fps(self): video_source = VideoSource(filename=self.VIDEO_FILE, target_fps=5) assert video_source._frames is not None # Frames should be pre-computed and resampled assert video_source.get_image() is not None
def test_from_file(self): video_source = VideoSource(filename=self.VIDEO_FILE) assert video_source._frames is None assert video_source.get_image() is not None
def setUp(self) -> None: self.video = VideoSource(filename=VIDEO_PATH) self.stream = VideoStream(video_source=self.video, fps=12.0)