Пример #1
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        output_q.put(detect_objects(frame_rgb, sess, detection_graph))
    fps.stop()
    sess.close()
Пример #2
0
    def worker(input_q, output_q):
        # Load a (frozen) Tensorflow model into memory.
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

            sess = tf.Session(graph=detection_graph)

        fps = FPS().start()
        while True:
            fps.update()
            frame = input_q.get()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            output_q.put(detect_objects(frame_rgb, sess, detection_graph))

        fps.stop()
        sess.close()



    # def publish_detected_object(label):
    #         context = zmq.Context()
    #         socket = context.socket(zmq.PUB)
    #         addr = '127.0.0.1'  # remote ip or localhost
    #         port = "5556"  # same as in the pupil remote gui
    #         socket.bind("tcp://{}:{}".format(addr, port))
    #         time.sleep(1)
    #         while label is not None:
    #             topic = 'detected_object'
    #             #print ('%s %s' % (topic, label))
    #             try:
    #                 socket.send_string('%s %s' % (topic, label))
    #             except TypeError:
    #                 socket.send('%s %s' % (topic, label))
    #             break
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()

    i = 0

    while True:
        fps.update()
        frame, original_frame = input_q.get()

        # print("WORKER SHAPE" + str(original_frame.shape))

        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if i % ANALYZE_EVERY_N_FRAMES == 0:
            data = detect_objects(frame_rgb[int(frame_rgb.shape[0] * (1 - PIECE_TO_ANALYZE)):], sess, detection_graph)
            for point in data['rect_points']:
                point['ymin'] = (1-PIECE_TO_ANALYZE) + point['ymin'] * (PIECE_TO_ANALYZE)
                point['ymax'] = (1-PIECE_TO_ANALYZE) + point['ymax'] * (PIECE_TO_ANALYZE)
            i = 0
        # result = detect_objects(frame_rgb, sess, detection_graph)
        result = data
        result['frame'] = frame
        result['original_frame'] = original_frame
        output_q.put(result)
        i += 1

    fps.stop()
    sess.close()
Пример #4
0
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    # logger = multiprocessing.log_to_stderr()
    # logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        print('4', sys.path)
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
Пример #5
0
def inference(sess, img_np):
    fps = FPS().start()
    fps.update()
    output = detect_objects(img_np, sess)
    display_PIL(output)
    fps.stop()
Пример #6
0
    def run_demo(self, args, mirror=False):

        # Define the codec and create VideoWriter object
        height = args.demo_size
        width = int(4.0 / 3 * args.demo_size)

        swidth = int(width / 4)
        sheight = int(height / 4)
        if args.record:
            fourcc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
            out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (2 * width, height))

        # print(args.video_source)
        if args.video_source.isdigit():
            args.video_source = int(args.video_source)

        cam = cv2.VideoCapture(args.video_source)
        cam.set(3, width)
        cam.set(4, height)
        key = 0
        idx = 0
        #
        CWD_PATH = os.getcwd()
        resume = os.path.join(CWD_PATH, 'cp_deeplabv3+_20181009-14.45.37_epoch50_lr0.01_50-Finished.pth.tar')
        m_lane = LaneSeg(model='resnet50', param=resume)

        # --- prepare input image
        # from scipy import misc
        font = cv2.FONT_HERSHEY_SIMPLEX
        fps = FPS().start()
        stopflag = False
        count = 0
        while not stopflag:

            # read frame
            idx += 1
            ret_val, img = cam.read()
            if mirror:
                img = cv2.flip(img, 1)

            if img is None:
                break

            cimg = img.copy()
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # y_pred = m_lane.inference(np.array(img))

            h, w = 480, 640
            # -- binary label
            # binary = y_pred.astype(np.uint8)
            # imageio.imwrite('binary_{}.png'.format(filename), binary)
            # -- colored image
            result_rgb = np.zeros((h, w, 3), dtype=np.uint8)

            step1 = np.zeros((h * max_width), dtype=np.uint8)

            # filename = os.path.join(os.getcwd(), 's.png')
            #
            # step1 = misc.imread(filename)  # input, np array

            self.lock.acquire()
            self.data.add(bytes("hello!"))
            self.lock.release()
            # print('thread ',self.id,' product once, add ',count)
            if IS_DEBUG:
                stopflag = True
                imageio.imwrite('s.png', result_rgb)

            fps.update()

            text2 = '[INFO] approx. FPS: {:.2f}'.format(fps.fps())  # fps.fps()

            # text
            showimg = np.concatenate((result_rgb, cimg), axis=1)
            # 图像     文字内容   坐标                                                                     字体
            cv2.putText(showimg, text2, (0, 20), font,
                        # 大小  颜色 字体厚度
                        0.9, (255, 255, 255), 3)
            cv2.imshow('Demo', showimg)

            key = cv2.waitKey(1)
            if args.record:
                out.write(showimg)
            if key == 27:
                stopflag = True

            # # print(key)
            # space to pause and resume
            if key == 32:
                while True:
                    key = cv2.waitKey(1)
                    if key == 32:
                        break

                    if key == 27:
                        stopflag = True
                        break

                    if key == 115:
                        id = datetime.datetime.now()
                        sourceImgName = "{}_input.png".format(id)
                        resultImgName = "{}_output.png".format(id)
                        imageio.imwrite(sourceImgName, img)
                        imageio.imwrite(resultImgName, result_rgb)

        cam.release()
        if args.record:
            out.release()
        cv2.destroyAllWindows()
    def videoLoop(self):

        # This try/except statement is a pretty ugly hack to get around
        # a RunTime error that Tkinter throws due to threading
        fTime = False
        try:
            # keep looping over frames until we are instructed to stop
            input_q = Queue(
                5)  # fps is better if queue is higher but then more lags
            output_q = Queue()
            for i in range(1):
                t = Thread(target=worker, args=(input_q, output_q))
                t.daemon = True
                t.start()
            fps = FPS().start()
            while not self.stopEvent.is_set():

                # grab the frame from the video stream and resize it to
                # have a maximum width of 1280 pixels
                self.frame = self.vs.read()
                self.frame = imutils.resize(self.frame, height=720)

                input_q.put(self.frame)

                t = time.time()

                font = cv2.FONT_HERSHEY_SIMPLEX
                data = output_q.get()
                rec_points = data['rect_points']
                class_names = data['class_names']
                class_colors = data['class_colors']
                for point, name, color in zip(rec_points, class_names,
                                              class_colors):
                    cv2.rectangle(
                        self.frame,
                        (int(point['xmin'] * 1280), int(point['ymin'] * 720)),
                        (int(point['xmax'] * 1280), int(point['ymax'] * 720)),
                        color, 3)
                    cv2.rectangle(
                        self.frame, (int(point['xmin'] * args.width),
                                     int(point['ymin'] * 720)),
                        (int(point['xmin'] * 1280) + len(name[0]) * 6,
                         int(point['ymin'] * 720) - 10), color, -1,
                        cv2.LINE_AA)
                    cv2.putText(
                        self.frame, name[0],
                        (int(point['xmin'] * 1280), int(point['ymin'] * 720)),
                        font, 0.3, (0, 0, 0), 1)

                # OpenCV represents images in BGR order; however PIL
                # represents images in RGB order, so we need to swap
                # the channels, then convert to PIL and ImageTk format
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(image)
                image = ImageTk.PhotoImage(image)

                # if the panel is not None, we need to initialize it
                if self.panel is None:

                    self.video_frame = tki.Frame(relief="raised",
                                                 borderwidth=5)
                    self.live_streem = tki.Label(self.video_frame,
                                                 text="Display")
                    self.live_streem.pack()

                    self.panel = tki.Label(self.video_frame, image=image)
                    self.panel.image = image
                    self.panel.pack(padx=10, pady=10)
                    self.video_frame.pack(side="top")

                # otherwise, simply update the panel

                else:
                    self.panel.configure(image=image)
                    self.panel.image = image
                if fTime == False:

                    mainframe = tki.Frame(self.root,
                                          relief="raised",
                                          borderwidth=5)

                    # Add a grid
                    select_cam_frame = tki.Frame(mainframe,
                                                 relief="raised",
                                                 borderwidth=5)
                    tki.Label(select_cam_frame,
                              text="Select an IP camera").pack()

                    # Create a Tkinter variable
                    tkvar_camera = tki.StringVar()

                    # Dictionary with options
                    #camera_choices = {'Camera 1 in 701','Camera 2 in 701','Camera 1 in 702','Camera 1 in 207'}
                    camera_choices = {
                        'Camera 1 in 701', 'Camera 2 in 701',
                        'Camera 1 in 702', 'Web Camera', 'Web Camera 2'
                    }
                    tkvar_camera.set(
                        'Camera 1 in 701')  # set the default option

                    tki.OptionMenu(select_cam_frame, tkvar_camera,
                                   *camera_choices).pack()

                    # on change dropdown value
                    def change_dropdown_camera(*args):
                        self._camera = tkvar_camera.get()
                        print(tkvar_camera.get())

                    # link function to change dropdown
                    tkvar_camera.trace('w', change_dropdown_camera)
                    select_cam_frame.pack(side="left")

                    select_model_frame = tki.Frame(mainframe,
                                                   relief="raised",
                                                   borderwidth=5)
                    tki.Label(select_model_frame,
                              text="Select an object detection model").pack()

                    # Create a Tkinter variable
                    tkvar_model = tki.StringVar()

                    # Dictionary with options
                    model_choices = {
                        'SSD Mobilenet V1', 'YOLO', 'Faster R-CNN',
                        'Mask R-CNN', 'ResNet'
                    }
                    tkvar_model.set(
                        'SSD Mobilenet V1')  # set the default option

                    tki.OptionMenu(select_model_frame, tkvar_model,
                                   *model_choices).pack()

                    # on change dropdown value
                    def change_dropdown_model(*args):
                        self._model = tkvar_model.get()
                        print(tkvar_model.get())

                    # link function to change dropdown
                    tkvar_model.trace('w', change_dropdown_model)

                    def run_button_pressed():
                        threading.Event().set()

                        self.vs.stop()

                        if self._camera == 'Camera 1 in 701':
                            # Camera 1 in 701
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.112:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 2 in 701':
                            # Camera 2 in 701
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.113:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 1 in 702':
                            # Camera 1 in 702
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.129:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 1 in 207':
                            # Camera 1 in 207
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.0.53:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Web Camera':
                            # Web Camera
                            self.vs = VideoStream(0).start()
                        elif self._camera == 'Web Camera 2':
                            # Web Camera 2
                            self.vs = VideoStream(1).start()
                        self.thread = threading.Thread(target=self.videoLoop)
                        print('Run button pressed!')

                    select_model_frame.pack(side="left")

                    # Run Button
                    run = tki.Button(mainframe,
                                     text='Run',
                                     fg="blue",
                                     font=("Courier", 30),
                                     command=run_button_pressed)
                    run.pack(side="left")

                    # Quit Button
                    quitbutton = tki.Button(mainframe,
                                            text='Exit',
                                            fg="red",
                                            font=("Courier", 30),
                                            command=self.onClose)
                    quitbutton.pack(side="left")
                    mainframe.pack()

                    fTime = True

            video_capture.stop()
            cv2.destroyAllWindows()
            fps.stop()
        except RuntimeError as e:
            print("[INFO] caught a RuntimeError")
Пример #8
0
def worker2(mid_q, output_q):
    fps = FPS().start()
    ser = serial.Serial('/dev/tty.HC-05-DevB')  # 注意选择串口号
    go = "1\n"
    turn_right = "2\n"
    stop = "3\n"
    turn_left = "4\n"
    back = "5\n"
    hand_put = "6\n"
    hand_catch = "7\n"
    go = go.encode('UTF-8')
    turn_right = turn_right.encode('UTF-8')
    stop = stop.encode('UTF-8')
    turn_left = turn_left.encode('UTF-8')
    back = back.encode('UTF-8')
    hand_put = hand_put.encode('UTF-8')
    hand_catch = hand_catch.encode('UTF-8')
    mid_loss = 0
    pri_loss = 0
    t_print = 0
    while True:
        fps.update()
        lb = output_q.get()
        hsv = cv2.cvtColor(lb, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, green_min, green_max)
        res = cv2.bitwise_and(lb, lb, mask=mask)
        kernel = np.ones((5, 5), np.uint8)
        kernel2 = np.ones((3, 3), np.uint8)
        dst = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
        res = cv2.erode(dst, kernel2, iterations=3)
        edges = cv2.Canny(res, 0, 30, 3)
        edges = np.array(edges)
        im = Image.fromarray(edges)
        width, height = im.size
        row_num = np.zeros(width)
        #line_num = np.zeros(height)
        #line_index=np.zeros(4)
        row_index = np.zeros(4)
        row_c = 0
        #line_c=0
        for i in range(0, height):
            for j in range(0, width):
                if edges[i, j] > 100:
                    #line_num[i]=line_num[i]+1
                    row_num[j] = row_num[j] + 1

        for i in range(0, 4):

            ac = row_num.argmax(axis=0)
            if row_num[ac] > 30:
                row_index[i] = ac
                row_num[ac] = 0
                row_c = row_c + 1


#           ab=line_num.argmax(axis=0)
#
#            if line_num[ab]>30:
#                line_index[i]=ab
#                line_num[ab]=0
#                line_c=line_c+1
        if row_c > 0:
            row_mid = (int)((np.mean(row_index)) * 4 / row_c)
        else:
            row_mid = np.mean(row_index)

        row_mid_de = int(width / 2)
        pri_loss = mid_loss
        mid_loss = row_mid - row_mid_de  #大于0,图像在中点右边;小于0,图像在中点左边
        if time.time() - t_print > 1.8:
            t_print = time.time()
            if mid_loss > 30:
                ser.write(turn_right)
                print("R")
            elif mid_loss < (-30):
                ser.write(turn_left)
                print("L")
            elif mid_loss > -30 or mid_loss < 30:
                ser.write(go)
                print("正了")

    #time.sleep(0.3)

    fps.stop()
    sess.close()
Пример #9
0
def main(argv):

    print("\n---------- Starting object detection ----------\n")

    # Instantiate an ObjectDetector class object
    # Takes the name of the model graph as an argument
    ObjectFinder = ObjectDetector('frozen_inference_graph.pb')

    # Initialize a parser object
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=1080,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=720,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=25,
                        help='Size of the queue.')
    args = parser.parse_args()

    # Initialize a logger object
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, ObjectFinder.worker, (input_q, output_q))
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # ------------------------------Control Loop ------------------------------
    fps = FPS().start()
    # fps._numFrames < 120
    frame_number = 0
    while True:
        frame_number += 1
        # Frame is a numpy nd array
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()
        print(
            "[INFO] elapsed time: {0:.3f}\nFrame number: {1}-------------------------------"
            .format((time.time() - t), frame_number))
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break
    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
Пример #10
0
def main(args):
    """Sets up object detection according to the provided args."""

    # If no number of workers are specified, use all available GPUs
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    draw_proc = Process(target=draw_worker,
                        args=(
                            input_q,
                            output_q,
                            args.detect_workers,
                            args.track_gpu_id,
                            args.rows,
                            args.cols,
                            args.detect_rate,
                        ))
    draw_proc.start()

    if args.stream:
        print('Reading from hls stream.')
        video_capture = HLSVideoStream(src=args.stream).start()
    elif args.video_path:
        print('Reading from local video.')
        video_capture = LocalVideoStream(src=args.video_path,
                                         width=args.width,
                                         height=args.height).start()
    else:
        print('Reading from webcam.')
        video_capture = LocalVideoStream(src=args.video_source,
                                         width=args.width,
                                         height=args.height).start()

    video_out = None
    if args.video_out_fname is not None:
        video_out = cv2.VideoWriter(
            args.video_out_fname, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
            OUTPUT_FRAME_RATE, (video_capture.WIDTH, video_capture.HEIGHT))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        try:
            frame = video_capture.read()
            input_q.put(frame)
            start_time = time.time()

            output_rgb = cv2.cvtColor(output_q.get()[0], cv2.COLOR_RGB2BGR)
            if args.show_frame:
                cv2.imshow('Video', output_rgb)
            if video_out is not None:
                video_out.write(output_rgb)
            fps.update()

            print('[INFO] elapsed time: {:.2f}'.format(time.time() -
                                                       start_time))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        except (KeyboardInterrupt, SystemExit):
            if video_out is not None:
                video_out.release()
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    if video_out is not None:
        video_out.release()
    draw_proc.join()
    video_capture.stop()
    cv2.destroyAllWindows()
def main():
	speed = 20 # 视频速度控制
	thread_num = 1 # 线程数量
	get_frame_num = 1 # 已经显示图片的数量
	video_path = "video/2.mp4" # 待检测的视频路径
	input_q = [Queue(400), # 输入队列列表,容量为400
				# Queue(400),
				# Queue(400),
				] 
	output_q = [Queue(), # 输出队列列表,无限大容量
				# Queue(),
				# Queue(),
				]
	for i in range(thread_num): # 进程的个数
		t = Thread(target=thread_worker, args=(input_q[i], output_q[i]))
		t.daemon = True # 这个线程是不重要的,在进程退出的时候,不用等待这个线程退出
		t.start()

	# 开始读取视频
	video_capture = cv2.VideoCapture(video_path)  # 导入视频
	global width, height # 通过opencv获取视频的尺寸
	width, height = int(video_capture.get(3)), int(video_capture.get(4))
	print('video width-height:', width, '-',height)
	fps = FPS().start() # 开始计算FPS,这句话的作用是打开计时器开始计时
	while True:
		ret, frame = video_capture.read() # 读取视频帧
		if ret == False: # 读完图片退出
			break
		fps.update() # 每读一帧,计数+1
		# if not input_q.full():
		in_q_index = fps.getNumFrames()%thread_num # 计算该帧图片应该入哪个输入队列
		input_q[in_q_index].put(frame) # 将该帧图片入输入队列

		frame_start_time = time.time() # 计录处理当前帧图片的起始时间
		out_q_index = get_frame_num%thread_num # 计算目前应该从哪个输出队列取图片显示
		if not output_q[out_q_index].empty():
			get_frame_num += 1 # 已经显示的图片数量+1
			# 将从输出队列获取到的图片色彩模式转换回BGR,再显示
			od_frame = cv2.cvtColor(output_q[out_q_index].get(), cv2.COLOR_RGB2BGR)
			ch = cv2.waitKey(speed) # 检测按键
			if ch & 0xFF == ord('q'): # q键:退出
				break
			elif ch & 0xFF == ord('w'): # w键:速度减慢
				speed += 10
			elif ch & 0xFF == ord('s'): # s键:速度加快
				speed -= 10
			elif ch & 0xFF == ord('r'): # r键:恢复初始速度
				speed = 50
			# 将速度放到图片左上角去
			cv2.putText(od_frame, 'SPEED:' + str(speed), (20, int(height/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.7, (0, 255, 0), 2)
			# 将当前帧数、运行时间、平均帧率标注到图片左上角去
			fps.stop()
			cv2.putText(od_frame, 'FRAME:{:}'.format(fps._numFrames), (20, int(height*2/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.8, (0, 255, 0), 2)
			cv2.putText(od_frame, 'TIME:{:.3f}'.format(fps.elapsed()), (20, int(height*3/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.8, (0, 255, 0), 2)
			cv2.putText(od_frame, 'AVE_FPS: {:.3f}'.format(fps.fps()), (20, int(height*4/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.7, (0, 0, 255), 2)
			cv2.imshow('Video', od_frame)
		# 打印当前帧处理所花的时间
		print('[INFO] elapsed time: {:.5f}'.format(time.time() - frame_start_time))

	fps.stop()
	# 打印总时间
	print('[INFO] elapsed time (total): {:.4f}'.format(fps.elapsed()))
	# 打印平均帧率
	print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

	cv2.destroyAllWindows()
Пример #12
0
def web():
    run = True
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=480,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=2,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while run is True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()