def __init__(self, name: str, enable_key: Optional[Union[str, int]] = None, max_fps: int = 30, input_check_interval: float = 0.01, enable: bool = True, daemon: bool = False, multi_input: bool = False): super().__init__(name=name, daemon=daemon) self._executor = None self._enabled = enable self.enable_key = enable_key self.max_fps = max_fps self.input_check_interval = input_check_interval self.multi_input = multi_input # A partitioned buffer manager the executor's buffer manager that # only accesses the buffers related to the node self._buffer_manager = None # Input/output buffers are a list of registered buffers' information self._input_buffers = [] self._output_buffers = [] # Event manager is a copy of assigned executor's event manager self._event_manager = None # A list of registered event information # See register_event() for more information # Note that we recommend to handle events in nodes by registering # handlers, but one can still access the raw event by _event_manager self._registered_events = [] # A list of (listener_threads, event_info) # See set_executor() for more information self._event_listener_threads = [] # A timer to calculate node FPS self._timer = StopWatch(window=10) # Register enable toggle key if self.enable_key: # If the node allows toggling enable, it should override the # `bypass` method to define the node behavior when disabled. if not is_method_overridden('bypass', Node, self.__class__): raise NotImplementedError( f'The node {self.__class__} does not support toggling' 'enable but got argument `enable_key`. To support toggling' 'enable, please override the `bypass` method of the node.') self.register_event( event_name=self.enable_key, is_keyboard=True, handler_func=self._toggle_enable, ) # Logger self.logger = logging.getLogger(f'Node "{self.name}"')
def inference_pose(): print('Thread "pose" started') stop_watch = StopWatch(window=10) while True: while len(det_result_queue) < 1: time.sleep(0.001) with det_result_queue_mutex: ts_input, frame, t_info, mmdet_results = det_result_queue.popleft() pose_results_list = [] for model_info, pose_history in zip(pose_model_list, pose_history_list): model_name = model_info['name'] pose_model = model_info['model'] cat_ids = model_info['cat_ids'] pose_results_last = pose_history['pose_results_last'] next_id = pose_history['next_id'] with stop_watch.timeit(model_name): # process mmdet results det_results = process_mmdet_results( mmdet_results, class_names=det_model.CLASSES, cat_ids=cat_ids) # inference pose model dataset_name = pose_model.cfg.data['test']['type'] pose_results, _ = inference_top_down_pose_model( pose_model, frame, det_results, bbox_thr=args.det_score_thr, format='xyxy', dataset=dataset_name) pose_results, next_id = get_track_id(pose_results, pose_results_last, next_id, use_oks=False, tracking_thr=0.3, use_one_euro=True, fps=None) pose_results_list.append(pose_results) # update pose history pose_history['pose_results_last'] = pose_results pose_history['next_id'] = next_id t_info += stop_watch.report_strings() with pose_result_queue_mutex: pose_result_queue.append((ts_input, t_info, pose_results_list)) event_inference_done.set()
def test_stopwatch(): window_size = 5 test_loop = 10 outer_time = 100 inner_time = 100 stop_watch = StopWatch(window=window_size) for _ in range(test_loop): with stop_watch.timeit(): time.sleep(outer_time / 1000.) with stop_watch.timeit('inner'): time.sleep(inner_time / 1000.) _ = stop_watch.report() _ = stop_watch.report_strings()
def test_stopwatch(): window_size = 5 test_loop = 10 outer_time = 100 inner_time = 100 stop_watch = StopWatch(window=window_size) for _ in range(test_loop): with stop_watch.timeit(): time.sleep(outer_time / 1000.) with stop_watch.timeit('inner'): time.sleep(inner_time / 1000.) report = stop_watch.report() _ = stop_watch.report_strings() np.testing.assert_allclose(report['_FPS_'], outer_time + inner_time, rtol=0.01) np.testing.assert_allclose(report['inner'], inner_time, rtol=0.01)
def inference_detection(): print('Thread "det" started') stop_watch = StopWatch(window=10) min_interval = 1.0 / args.inference_fps _ts_last = None # timestamp when last inference was done while True: while len(input_queue) < 1: time.sleep(0.001) with input_queue_mutex: ts_input, frame = input_queue.popleft() # inference detection with stop_watch.timeit('Det'): mmdet_results = inference_detector(det_model, frame) t_info = stop_watch.report_strings() with det_result_queue_mutex: det_result_queue.append((ts_input, frame, t_info, mmdet_results)) # limit the inference FPS _ts = time.time() if _ts_last is not None and _ts - _ts_last < min_interval: time.sleep(min_interval - _ts + _ts_last) _ts_last = time.time()
def display(): print('Thread "display" started') stop_watch = StopWatch(window=10) # initialize result status ts_inference = None # timestamp of the latest inference result fps_inference = 0. # infenrece FPS t_delay_inference = 0. # inference result time delay pose_results_list = None # latest inference result t_info = [] # upstream time information (list[str]) # initialize visualization and output sunglasses_img = None # resource image for sunglasses effect text_color = (228, 183, 61) # text color to show time/system information vid_out = None # video writer # show instructions print('Keyboard shortcuts: ') print('"v": Toggle the visualization of bounding boxes and poses.') print('"s": Toggle the sunglasses effect.') print('"b": Toggle the bug-eye effect.') print('"Q", "q" or Esc: Exit.') while True: with stop_watch.timeit('_FPS_'): # acquire a frame from buffer ts_input, frame = frame_buffer.get() # input ending signal if ts_input is None: break img = frame # get pose estimation results if len(pose_result_queue) > 0: with pose_result_queue_mutex: _result = pose_result_queue.popleft() _ts_input, t_info, pose_results_list = _result _ts = time.time() if ts_inference is not None: fps_inference = 1.0 / (_ts - ts_inference) ts_inference = _ts t_delay_inference = (_ts - _ts_input) * 1000 # visualize detection and pose results if pose_results_list is not None: for model_info, pose_results in zip(pose_model_list, pose_results_list): pose_model = model_info['model'] bbox_color = model_info['bbox_color'] dataset_name = pose_model.cfg.data['test']['type'] # show pose results if args.show_pose: img = vis_pose_result(pose_model, img, pose_results, radius=4, thickness=2, dataset=dataset_name, kpt_score_thr=args.kpt_thr, bbox_color=bbox_color) # sunglasses effect if args.sunglasses: if dataset_name == 'TopDownCocoDataset': left_eye_idx = 1 right_eye_idx = 2 elif dataset_name == 'AnimalPoseDataset': left_eye_idx = 0 right_eye_idx = 1 else: raise ValueError( 'Sunglasses effect does not support' f'{dataset_name}') if sunglasses_img is None: # The image attributes to: # https://www.vecteezy.com/free-vector/glass # Glass Vectors by Vecteezy sunglasses_img = cv2.imread( 'demo/resources/sunglasses.jpg') img = apply_sunglasses_effect(img, pose_results, sunglasses_img, left_eye_idx, right_eye_idx) # bug-eye effect if args.bugeye: if dataset_name == 'TopDownCocoDataset': left_eye_idx = 1 right_eye_idx = 2 elif dataset_name == 'AnimalPoseDataset': left_eye_idx = 0 right_eye_idx = 1 else: raise ValueError('Bug-eye effect does not support' f'{dataset_name}') img = apply_bugeye_effect(img, pose_results, left_eye_idx, right_eye_idx) # delay control if args.display_delay > 0: t_sleep = args.display_delay * 0.001 - (time.time() - ts_input) if t_sleep > 0: time.sleep(t_sleep) t_delay = (time.time() - ts_input) * 1000 # show time information t_info_display = stop_watch.report_strings() # display fps t_info_display.append(f'Inference FPS: {fps_inference:>5.1f}') t_info_display.append(f'Delay: {t_delay:>3.0f}') t_info_display.append( f'Inference Delay: {t_delay_inference:>3.0f}') t_info_str = ' | '.join(t_info_display + t_info) cv2.putText(img, t_info_str, (20, 20), cv2.FONT_HERSHEY_DUPLEX, 0.3, text_color, 1) # collect system information sys_info = [ f'RES: {img.shape[1]}x{img.shape[0]}', f'Buffer: {frame_buffer.qsize()}/{frame_buffer.maxsize}' ] if psutil_proc is not None: sys_info += [ f'CPU: {psutil_proc.cpu_percent():.1f}%', f'MEM: {psutil_proc.memory_percent():.1f}%' ] sys_info_str = ' | '.join(sys_info) cv2.putText(img, sys_info_str, (20, 40), cv2.FONT_HERSHEY_DUPLEX, 0.3, text_color, 1) # save the output video frame if args.out_video_file is not None: if vid_out is None: fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = args.out_video_fps frame_size = (img.shape[1], img.shape[0]) vid_out = cv2.VideoWriter(args.out_video_file, fourcc, fps, frame_size) vid_out.write(img) # display cv2.imshow('mmpose webcam demo', img) keyboard_input = cv2.waitKey(1) if keyboard_input in (27, ord('q'), ord('Q')): break elif keyboard_input == ord('s'): args.sunglasses = not args.sunglasses elif keyboard_input == ord('b'): args.bugeye = not args.bugeye elif keyboard_input == ord('v'): args.show_pose = not args.show_pose cv2.destroyAllWindows() if vid_out is not None: vid_out.release() event_exit.set()