def main(): args = build_argparser().parse_args() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_plugin_configs(args.device, args.num_streams, args.num_threads) log.info('Loading network...') model = get_model(ie, args) has_landmarks = args.architecture_type == 'retina' detector_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) try: input_stream = int(args.input) except ValueError: input_stream = args.input cap = cv2.VideoCapture(input_stream) if not cap.isOpened(): log.error('OpenCV: Failed to open capture: ' + str(input_stream)) sys.exit(1) next_frame_id = 0 next_frame_id_to_show = 0 log.info('Starting inference...') print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key") palette = ColorPalette(len(model.labels) if model.labels else 100) presenter = monitors.Presenter(args.utilization_monitors, 55, (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH) / 4), round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / 8))) metrics = PerformanceMetrics() while cap.isOpened(): if detector_pipeline.callback_exceptions: raise detector_pipeline.callback_exceptions[0] # Process all completed requests results = detector_pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(objects) and args.raw_output_message: print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold) presenter.drawGraphs(frame) frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold, has_landmarks) metrics.update(start_time, frame) if not args.no_show: cv2.imshow('Detection Results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) next_frame_id_to_show += 1 continue if detector_pipeline.is_ready(): # Get new image/frame start_time = perf_counter() ret, frame = cap.read() if not ret: if args.loop: cap.open(input_stream) else: cap.release() continue # Submit for inference detector_pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time}) next_frame_id += 1 else: # Wait for empty request detector_pipeline.await_any() detector_pipeline.await_all() # Process completed requests while detector_pipeline.has_completed_request(): results = detector_pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(objects) and args.raw_output_message: print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold) presenter.drawGraphs(frame) frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold, has_landmarks) metrics.update(start_time, frame) if not args.no_show: cv2.imshow('Detection Results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) next_frame_id_to_show += 1 else: break metrics.print_total() print(presenter.reportMeans())
def main(): args = build_argparser().parse_args() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) cap = open_images_capture(args.input, args.loop) start_time = perf_counter() frame = cap.read() if frame is None: raise RuntimeError("Can't read an image from the input") log.info('Loading network...') model = Deblurring(ie, args.model, frame.shape) pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('Starting inference...') print( "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key" ) pipeline.submit_data(frame, 0, {'frame': frame, 'start_time': start_time}) next_frame_id = 1 next_frame_id_to_show = 0 metrics = PerformanceMetrics() presenter = monitors.Presenter( args.utilization_monitors, 55, (round(frame.shape[1] / 4), round(frame.shape[0] / 8))) video_writer = cv2.VideoWriter() if args.output and not video_writer.open( args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), (2 * frame.shape[1], frame.shape[0])): raise RuntimeError("Can't open video writer") while True: if pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: break # Submit for inference pipeline.submit_data(frame, next_frame_id, { 'frame': frame, 'start_time': start_time }) next_frame_id += 1 else: # Wait for empty request pipeline.await_any() if pipeline.callback_exceptions: raise pipeline.callback_exceptions[0] # Process all completed requests results = pipeline.get_result(next_frame_id_to_show) if results: result_frame, frame_meta = results input_frame = frame_meta['frame'] start_time = frame_meta['start_time'] if input_frame.shape != result_frame.shape: input_frame = cv2.resize( input_frame, (result_frame.shape[1], result_frame.shape[0])) final_image = cv2.hconcat([input_frame, result_frame]) presenter.drawGraphs(final_image) metrics.update(start_time, final_image) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(final_image) if not args.no_show: cv2.imshow('Deblurring Results', final_image) key = cv2.waitKey(1) if key == 27 or key == 'q' or key == 'Q': break presenter.handleKey(key) next_frame_id_to_show += 1 pipeline.await_all() # Process completed requests while pipeline.has_completed_request(): results = pipeline.get_result(next_frame_id_to_show) if results: result_frame, frame_meta = results input_frame = frame_meta['frame'] start_time = frame_meta['start_time'] if input_frame.shape != result_frame.shape: input_frame = cv2.resize( input_frame, (result_frame.shape[1], result_frame.shape[0])) final_image = cv2.hconcat([input_frame, result_frame]) presenter.drawGraphs(final_image) metrics.update(start_time, final_image) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(final_image) if not args.no_show: cv2.imshow('Deblurring Results', final_image) key = cv2.waitKey(1) next_frame_id_to_show += 1 else: break metrics.print_total() print(presenter.reportMeans())
def main(): args = build_argparser().parse_args() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_plugin_configs(args.device, args.num_streams, args.num_threads) log.info('Loading network...') model = get_model(ie, args) detector_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) cap = open_images_capture(args.input, args.loop) next_frame_id = 0 next_frame_id_to_show = 0 log.info('Starting inference...') print( "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key" ) palette = ColorPalette(len(model.labels) if model.labels else 100) metrics = PerformanceMetrics() presenter = None video_writer = cv2.VideoWriter() while True: if detector_pipeline.callback_exceptions: raise detector_pipeline.callback_exceptions[0] # Process all completed requests results = detector_pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(objects) and args.raw_output_message: print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold) presenter.drawGraphs(frame) frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: cv2.imshow('Detection Results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) next_frame_id_to_show += 1 continue if detector_pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError("Can't read an image from the input") break if next_frame_id == 0: presenter = monitors.Presenter( args.utilization_monitors, 55, (round(frame.shape[1] / 4), round(frame.shape[0] / 8))) if args.output and not video_writer.open( args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), (frame.shape[1], frame.shape[0])): raise RuntimeError("Can't open video writer") # Submit for inference detector_pipeline.submit_data(frame, next_frame_id, { 'frame': frame, 'start_time': start_time }) next_frame_id += 1 else: # Wait for empty request detector_pipeline.await_any() detector_pipeline.await_all() # Process completed requests while detector_pipeline.has_completed_request(): results = detector_pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(objects) and args.raw_output_message: print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold) presenter.drawGraphs(frame) frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: cv2.imshow('Detection Results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) next_frame_id_to_show += 1 else: break metrics.print_total() print(presenter.reportMeans())
def main(): args = build_argparser().parse_args() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_plugin_configs(args.device, args.num_streams, args.num_threads) log.info('Loading network...') model = get_model(ie, args) detector_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) ### READ TIME ### read_time_start = time.time() cap = open_images_capture(args.input, args.loop) read_time_end = time.time() next_frame_id = 0 next_frame_id_to_show = 0 image_id = 0 log.info('Starting inference...') print( "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key" ) palette = ColorPalette(len(model.labels) if model.labels else 100) metrics = PerformanceMetrics() presenter = None video_writer = cv2.VideoWriter() results_list = [] detection_ids = [1, 3, 4] all_starts = 0 while True: print('NEXT FRAME ID', next_frame_id) id = images[image_id] if next_frame_id == 5000: break if detector_pipeline.callback_exceptions: raise detector_pipeline.callback_exceptions[0] # Process all completed requests #### DETECTION TIME #### detect_time_start = time.time() results = detector_pipeline.get_result(next_frame_id_to_show) detect_time_end = time.time() detect_time_list.append(detect_time_end - detect_time_start) if results: objects, frame_meta = results for detection in objects: x = float(detection.xmin) y = float(detection.ymin) w = float(detection.xmax - detection.xmin) h = float(detection.ymax - detection.ymin) cls = detection.id cls = yolo_to_ssd_classes[cls] id = str(id.lstrip('0').split('.')[0]) conf = detection.score # if cls in detection_ids: results_list.append({ 'image_id': int(id), 'category_id': cls, 'bbox': [x, y, w, h], 'score': float(conf) }) frame = frame_meta['frame'] post_process_start = time.time() start_time = frame_meta['start_time'] all_starts += start_time all_starts += start_time if len(objects) and args.raw_output_message: print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold, images[image_id]) presenter.drawGraphs(frame) frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold, images[image_id]) metrics.update(start_time, frame) post_process_end = time.time() post_process_list.append(post_process_end - post_process_start) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: # cv2.imshow('Detection Results', frame) cv2.imwrite( f"/home/sovit/my_data/Data_Science/Projects/openvino_experiments/model_quantization/data/images/image_{image_id}.jpg", frame) # key = cv2.waitKey(1) ESC_KEY = 27 # Quit. #if key in {ord('q'), ord('Q'), ESC_KEY}: #break #presenter.handleKey(key) next_frame_id_to_show += 1 image_id += 1 continue if detector_pipeline.is_ready(): # Get new image/frame pre_process_start = time.time() start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError("Can't read an image from the input") break if next_frame_id == 0: presenter = monitors.Presenter( args.utilization_monitors, 55, (round(frame.shape[1] / 4), round(frame.shape[0] / 8))) if args.output and not video_writer.open( args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), (frame.shape[1], frame.shape[0])): raise RuntimeError("Can't open video writer") # Submit for inference detector_pipeline.submit_data(frame, next_frame_id, { 'frame': frame, 'start_time': start_time }) pre_process_end = time.time() pre_process_list.append(pre_process_end - pre_process_start) next_frame_id += 1 else: # Wait for empty request detector_pipeline.await_any() results_file = 'results.json' with open(results_file, 'w') as f: f.write(json.dumps(results_list, indent=4)) detector_pipeline.await_all() # Process completed requests while detector_pipeline.has_completed_request(): results = detector_pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] post_process_two_start = time.time() start_time = frame_meta['start_time'] if len(objects) and args.raw_output_message: print() # print_raw_results(frame.shape[:2], objects, model.labels, args.prob_threshold) presenter.drawGraphs(frame) # frame = draw_detections(frame, objects, palette, model.labels, args.prob_threshold) metrics.update(start_time, frame) post_process_two_end = time.time() post_process_list_two.append(post_process_two_end - post_process_two_start) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: # cv2.imshow('Detection Results', frame) cv2.imwrite( f"/home/sovit/my_data/Data_Science/Projects/openvino_experiments/model_quantization/data/images/image_{frame_id}.jpg", frame) # key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) next_frame_id_to_show += 1 else: break metrics.print_total() print("Presentor", presenter.reportMeans())
def main(): metrics = PerformanceMetrics() args = build_argparser().parse_args() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) log.info('Loading network...') model, visualizer = get_model(ie, args) pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) cap = open_images_capture(args.input, args.loop) next_frame_id = 0 next_frame_id_to_show = 0 log.info('Starting inference...') print( "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key" ) presenter = None output_transform = None video_writer = cv2.VideoWriter() only_masks = args.only_masks while True: if pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError("Can't read an image from the input") break if next_frame_id == 0: output_transform = OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter( args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) if args.output and not video_writer.open( args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError("Can't open video writer") # Submit for inference pipeline.submit_data(frame, next_frame_id, { 'frame': frame, 'start_time': start_time }) next_frame_id += 1 else: # Wait for empty request pipeline.await_any() if pipeline.callback_exceptions: raise pipeline.callback_exceptions[0] # Process all completed requests results = pipeline.get_result(next_frame_id_to_show) if results: objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] frame = render_segmentation(frame, objects, visualizer, output_transform, only_masks) presenter.drawGraphs(frame) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow('Segmentation Results', frame) key = cv2.waitKey(1) if key == 27 or key == 'q' or key == 'Q': break if key == 9: only_masks = not only_masks presenter.handleKey(key) pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = pipeline.get_result(next_frame_id_to_show) while results is None: results = pipeline.get_result(next_frame_id_to_show) objects, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] frame = render_segmentation(frame, objects, visualizer, output_transform, only_masks) presenter.drawGraphs(frame) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: cv2.imshow('Segmentation Results', frame) key = cv2.waitKey(1) metrics.print_total() print(presenter.reportMeans())
def main(): args = build_argparser().parse_args() metrics = PerformanceMetrics() log.info('Initializing Inference Engine...') ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) cap = open_images_capture(args.input, args.loop) start_time = perf_counter() frame = cap.read() if frame is None: raise RuntimeError("Can't read an image from the input") log.info('Loading network...') model = get_model(ie, args, frame.shape[1] / frame.shape[0]) hpe_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('Starting inference...') hpe_pipeline.submit_data(frame, 0, { 'frame': frame, 'start_time': start_time }) next_frame_id = 1 next_frame_id_to_show = 0 output_transform = models.OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter( args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) video_writer = cv2.VideoWriter() if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError("Can't open video writer") print( "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key" ) while True: if hpe_pipeline.callback_exceptions: raise hpe_pipeline.callback_exceptions[0] # Process all completed requests results = hpe_pipeline.get_result(next_frame_id_to_show) if results: (poses, scores), frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(poses) and args.raw_output_message: print_raw_results(poses, scores) presenter.drawGraphs(frame) frame = draw_poses(frame, poses, args.prob_threshold, output_transform) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow('Pose estimation results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) continue if hpe_pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: break # Submit for inference hpe_pipeline.submit_data(frame, next_frame_id, { 'frame': frame, 'start_time': start_time }) next_frame_id += 1 else: # Wait for empty request hpe_pipeline.await_any() hpe_pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = hpe_pipeline.get_result(next_frame_id_to_show) while results is None: results = hpe_pipeline.get_result(next_frame_id_to_show) (poses, scores), frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if len(poses) and args.raw_output_message: print_raw_results(poses, scores) presenter.drawGraphs(frame) frame = draw_poses(frame, poses, args.prob_threshold, output_transform) metrics.update(start_time, frame) if video_writer.isOpened() and ( args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit - 1): video_writer.write(frame) if not args.no_show: cv2.imshow('Pose estimation results', frame) key = cv2.waitKey(1) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) metrics.print_total() print(presenter.reportMeans())