pass # fill up queue else: font = cv2.FONT_HERSHEY_SIMPLEX data = output_q.get() rec_points = data['rect_points'] class_names = data['class_names'] class_colors = data['class_colors'] for point, name, color in zip(rec_points, class_names, class_colors): cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), (int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3) cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), (int(point['xmin'] * args.width) + len(name[0]) * 6, int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA) cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font, 0.3, (0, 0, 0), 1) cv2.imshow('Video', frame) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) video_capture.stop() cv2.destroyAllWindows()
input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while True: # fps._numFrames < 120 frame = video_capture.read() input_q.put(frame) t = time.time() cv2.imshow('Video', output_q.get()) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()
def main(argv): print("\n---------- Starting object detection ----------\n") # Instantiate an ObjectDetector class object # Takes the name of the model graph as an argument ObjectFinder = ObjectDetector('frozen_inference_graph.pb') # Initialize a parser object parser = argparse.ArgumentParser() parser.add_argument('-src', '--source', dest='video_source', type=int, default=0, help='Device index of the camera.') parser.add_argument('-wd', '--width', dest='width', type=int, default=1080, help='Width of the frames in the video stream.') parser.add_argument('-ht', '--height', dest='height', type=int, default=720, help='Height of the frames in the video stream.') parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int, default=4, help='Number of workers.') parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int, default=25, help='Size of the queue.') args = parser.parse_args() # Initialize a logger object logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, ObjectFinder.worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() # ------------------------------Control Loop ------------------------------ fps = FPS().start() # fps._numFrames < 120 frame_number = 0 while True: frame_number += 1 # Frame is a numpy nd array frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() print( "[INFO] elapsed time: {0:.3f}\nFrame number: {1}-------------------------------" .format((time.time() - t), frame_number)) if (cv2.waitKey(1) & 0xFF == ord('q')): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()
def main(args): """Sets up object detection according to the provided args.""" # If no number of workers are specified, use all available GPUs input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) draw_proc = Process(target=draw_worker, args=( input_q, output_q, args.detect_workers, args.track_gpu_id, args.rows, args.cols, args.detect_rate, )) draw_proc.start() if args.stream: print('Reading from hls stream.') video_capture = HLSVideoStream(src=args.stream).start() elif args.video_path: print('Reading from local video.') video_capture = LocalVideoStream(src=args.video_path, width=args.width, height=args.height).start() else: print('Reading from webcam.') video_capture = LocalVideoStream(src=args.video_source, width=args.width, height=args.height).start() video_out = None if args.video_out_fname is not None: video_out = cv2.VideoWriter( args.video_out_fname, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), OUTPUT_FRAME_RATE, (video_capture.WIDTH, video_capture.HEIGHT)) fps = FPS().start() while True: # fps._numFrames < 120 try: frame = video_capture.read() input_q.put(frame) start_time = time.time() output_rgb = cv2.cvtColor(output_q.get()[0], cv2.COLOR_RGB2BGR) if args.show_frame: cv2.imshow('Video', output_rgb) if video_out is not None: video_out.write(output_rgb) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - start_time)) if cv2.waitKey(1) & 0xFF == ord('q'): break except (KeyboardInterrupt, SystemExit): if video_out is not None: video_out.release() break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) if video_out is not None: video_out.release() draw_proc.join() video_capture.stop() cv2.destroyAllWindows()
def main(): speed = 20 # 视频速度控制 thread_num = 1 # 线程数量 get_frame_num = 1 # 已经显示图片的数量 video_path = "video/2.mp4" # 待检测的视频路径 input_q = [Queue(400), # 输入队列列表,容量为400 # Queue(400), # Queue(400), ] output_q = [Queue(), # 输出队列列表,无限大容量 # Queue(), # Queue(), ] for i in range(thread_num): # 进程的个数 t = Thread(target=thread_worker, args=(input_q[i], output_q[i])) t.daemon = True # 这个线程是不重要的,在进程退出的时候,不用等待这个线程退出 t.start() # 开始读取视频 video_capture = cv2.VideoCapture(video_path) # 导入视频 global width, height # 通过opencv获取视频的尺寸 width, height = int(video_capture.get(3)), int(video_capture.get(4)) print('video width-height:', width, '-',height) fps = FPS().start() # 开始计算FPS,这句话的作用是打开计时器开始计时 while True: ret, frame = video_capture.read() # 读取视频帧 if ret == False: # 读完图片退出 break fps.update() # 每读一帧,计数+1 # if not input_q.full(): in_q_index = fps.getNumFrames()%thread_num # 计算该帧图片应该入哪个输入队列 input_q[in_q_index].put(frame) # 将该帧图片入输入队列 frame_start_time = time.time() # 计录处理当前帧图片的起始时间 out_q_index = get_frame_num%thread_num # 计算目前应该从哪个输出队列取图片显示 if not output_q[out_q_index].empty(): get_frame_num += 1 # 已经显示的图片数量+1 # 将从输出队列获取到的图片色彩模式转换回BGR,再显示 od_frame = cv2.cvtColor(output_q[out_q_index].get(), cv2.COLOR_RGB2BGR) ch = cv2.waitKey(speed) # 检测按键 if ch & 0xFF == ord('q'): # q键:退出 break elif ch & 0xFF == ord('w'): # w键:速度减慢 speed += 10 elif ch & 0xFF == ord('s'): # s键:速度加快 speed -= 10 elif ch & 0xFF == ord('r'): # r键:恢复初始速度 speed = 50 # 将速度放到图片左上角去 cv2.putText(od_frame, 'SPEED:' + str(speed), (20, int(height/20)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # 将当前帧数、运行时间、平均帧率标注到图片左上角去 fps.stop() cv2.putText(od_frame, 'FRAME:{:}'.format(fps._numFrames), (20, int(height*2/20)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) cv2.putText(od_frame, 'TIME:{:.3f}'.format(fps.elapsed()), (20, int(height*3/20)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) cv2.putText(od_frame, 'AVE_FPS: {:.3f}'.format(fps.fps()), (20, int(height*4/20)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.imshow('Video', od_frame) # 打印当前帧处理所花的时间 print('[INFO] elapsed time: {:.5f}'.format(time.time() - frame_start_time)) fps.stop() # 打印总时间 print('[INFO] elapsed time (total): {:.4f}'.format(fps.elapsed())) # 打印平均帧率 print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) cv2.destroyAllWindows()
def web(): run = True parser = argparse.ArgumentParser() parser.add_argument('-src', '--source', dest='video_source', type=int, default=0, help='Device index of the camera.') parser.add_argument('-wd', '--width', dest='width', type=int, default=480, help='Width of the frames in the video stream.') parser.add_argument('-ht', '--height', dest='height', type=int, default=360, help='Height of the frames in the video stream.') parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int, default=2, help='Number of workers.') parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int, default=5, help='Size of the queue.') args = parser.parse_args() logger = multiprocessing.log_to_stderr() logger.setLevel(multiprocessing.SUBDEBUG) input_q = Queue(maxsize=args.queue_size) output_q = Queue(maxsize=args.queue_size) pool = Pool(args.num_workers, worker, (input_q, output_q)) video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while run is True: # fps._numFrames < 120 frame = video_capture.read() input_q.put(frame) t = time.time() output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR) cv2.imshow('Video', output_rgb) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) pool.terminate() video_capture.stop() cv2.destroyAllWindows()