def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
            pass  # fill up queue
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            data = output_q.get()
            rec_points = data['rect_points']
            class_names = data['class_names']
            class_colors = data['class_colors']
            for point, name, color in zip(rec_points, class_names, class_colors):
                cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
                              (int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3)
                cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
                              (int(point['xmin'] * args.width) + len(name[0]) * 6,
                               int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA)
                cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font,
                            0.3, (0, 0, 0), 1)
            cv2.imshow('Video', frame)

        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    video_capture.stop()
    cv2.destroyAllWindows()
    else:
        print('Reading from webcam.')
        video_capture = WebcamVideoStream(src=args.video_source,
                                          width=args.width,
                                          height=args.height).start()

    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
class Realtime:
    """
    Read and apply object detection to input video stream
    """
    def __init__(self, args):
        self.display = args["display"] == 1
        self.queue_input = None
        self.queue_output = None
        self.pool = None
        self.vs = None
        self.fps = None

        self.start_queue(args["logger_debug"], args["queue_size"],
                         args["num_workers"])
        self.start_stream(args["input_device"])

    def start_queue(self, debugger, size, workers):
        """
        Starts processing queue.
        """

        if debugger:
            logger = multiprocessing.log_to_stderr()
            logger.setLevel(multiprocessing.SUBDEBUG)

        self.queue_input = Queue(maxsize=size)
        self.queue_output = Queue(maxsize=size)
        self.pool = Pool(workers, worker,
                         (self.queue_input, self.queue_output))

    def start_stream(self, device):
        """
        Create a threaded video stream and start the FPS counter.
        """

        self.vs = WebcamVideoStream(src=device).start()
        self.fps = FPS().start()

    def start(self):
        """
        Start processing video feed.
        """

        if self.display:
            print()
            print(
                "====================================================================="
            )
            print(
                "Starting video acquisition. Press 'q' (on the video windows) to stop."
            )
            print(
                "====================================================================="
            )
            print()

        # Start reading and treating the video stream
        running = True
        while running:
            running = self.capture()

        self.destroy()

    def capture(self):
        """
        Capture and process video frame.
        """

        if cv2.waitKey(1) & 0xFF == ord('q'):
            return False

        # Capture frame-by-frame
        ret, frame = self.vs.read()

        # No new frame, try again
        if not ret:
            return True

        # Place frame in queue
        self.queue_input.put(frame)

        # Display the resulting frame
        if self.display:
            cv2.imshow(
                'frame',
                cv2.cvtColor(self.queue_output.get(), cv2.COLOR_RGB2BGR))
            self.fps.update()

        return True

    def destroy(self):
        """
        Stop threads and hide OpenCV frame.
        """

        # When everything done, release the capture
        self.fps.stop()
        self.pool.terminate()
        self.vs.stop()

        cv2.destroyAllWindows()
def inference(sess, img_np):
    fps = FPS().start()
    fps.update()
    output = detect_objects(img_np, sess)
    display_PIL(output)
    fps.stop()
Esempio n. 6
0
def main(argv):

    print("\n---------- Starting object detection ----------\n")

    # Instantiate an ObjectDetector class object
    # Takes the name of the model graph as an argument
    ObjectFinder = ObjectDetector('frozen_inference_graph.pb')

    # Initialize a parser object
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=1080,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=720,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=25,
                        help='Size of the queue.')
    args = parser.parse_args()

    # Initialize a logger object
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, ObjectFinder.worker, (input_q, output_q))
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # ------------------------------Control Loop ------------------------------
    fps = FPS().start()
    # fps._numFrames < 120
    frame_number = 0
    while True:
        frame_number += 1
        # Frame is a numpy nd array
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()
        print(
            "[INFO] elapsed time: {0:.3f}\nFrame number: {1}-------------------------------"
            .format((time.time() - t), frame_number))
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break
    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
def main(args):
    """Sets up object detection according to the provided args."""

    # If no number of workers are specified, use all available GPUs
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    draw_proc = Process(target=draw_worker,
                        args=(
                            input_q,
                            output_q,
                            args.detect_workers,
                            args.track_gpu_id,
                            args.rows,
                            args.cols,
                            args.detect_rate,
                        ))
    draw_proc.start()

    if args.stream:
        print('Reading from hls stream.')
        video_capture = HLSVideoStream(src=args.stream).start()
    elif args.video_path:
        print('Reading from local video.')
        video_capture = LocalVideoStream(src=args.video_path,
                                         width=args.width,
                                         height=args.height).start()
    else:
        print('Reading from webcam.')
        video_capture = LocalVideoStream(src=args.video_source,
                                         width=args.width,
                                         height=args.height).start()

    video_out = None
    if args.video_out_fname is not None:
        video_out = cv2.VideoWriter(
            args.video_out_fname, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
            OUTPUT_FRAME_RATE, (video_capture.WIDTH, video_capture.HEIGHT))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        try:
            frame = video_capture.read()
            input_q.put(frame)
            start_time = time.time()

            output_rgb = cv2.cvtColor(output_q.get()[0], cv2.COLOR_RGB2BGR)
            if args.show_frame:
                cv2.imshow('Video', output_rgb)
            if video_out is not None:
                video_out.write(output_rgb)
            fps.update()

            print('[INFO] elapsed time: {:.2f}'.format(time.time() -
                                                       start_time))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        except (KeyboardInterrupt, SystemExit):
            if video_out is not None:
                video_out.release()
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    if video_out is not None:
        video_out.release()
    draw_proc.join()
    video_capture.stop()
    cv2.destroyAllWindows()
def main():
	speed = 20 # 视频速度控制
	thread_num = 1 # 线程数量
	get_frame_num = 1 # 已经显示图片的数量
	video_path = "video/2.mp4" # 待检测的视频路径
	input_q = [Queue(400), # 输入队列列表,容量为400
				# Queue(400),
				# Queue(400),
				] 
	output_q = [Queue(), # 输出队列列表,无限大容量
				# Queue(),
				# Queue(),
				]
	for i in range(thread_num): # 进程的个数
		t = Thread(target=thread_worker, args=(input_q[i], output_q[i]))
		t.daemon = True # 这个线程是不重要的,在进程退出的时候,不用等待这个线程退出
		t.start()

	# 开始读取视频
	video_capture = cv2.VideoCapture(video_path)  # 导入视频
	global width, height # 通过opencv获取视频的尺寸
	width, height = int(video_capture.get(3)), int(video_capture.get(4))
	print('video width-height:', width, '-',height)
	fps = FPS().start() # 开始计算FPS,这句话的作用是打开计时器开始计时
	while True:
		ret, frame = video_capture.read() # 读取视频帧
		if ret == False: # 读完图片退出
			break
		fps.update() # 每读一帧,计数+1
		# if not input_q.full():
		in_q_index = fps.getNumFrames()%thread_num # 计算该帧图片应该入哪个输入队列
		input_q[in_q_index].put(frame) # 将该帧图片入输入队列

		frame_start_time = time.time() # 计录处理当前帧图片的起始时间
		out_q_index = get_frame_num%thread_num # 计算目前应该从哪个输出队列取图片显示
		if not output_q[out_q_index].empty():
			get_frame_num += 1 # 已经显示的图片数量+1
			# 将从输出队列获取到的图片色彩模式转换回BGR,再显示
			od_frame = cv2.cvtColor(output_q[out_q_index].get(), cv2.COLOR_RGB2BGR)
			ch = cv2.waitKey(speed) # 检测按键
			if ch & 0xFF == ord('q'): # q键:退出
				break
			elif ch & 0xFF == ord('w'): # w键:速度减慢
				speed += 10
			elif ch & 0xFF == ord('s'): # s键:速度加快
				speed -= 10
			elif ch & 0xFF == ord('r'): # r键:恢复初始速度
				speed = 50
			# 将速度放到图片左上角去
			cv2.putText(od_frame, 'SPEED:' + str(speed), (20, int(height/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.7, (0, 255, 0), 2)
			# 将当前帧数、运行时间、平均帧率标注到图片左上角去
			fps.stop()
			cv2.putText(od_frame, 'FRAME:{:}'.format(fps._numFrames), (20, int(height*2/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.8, (0, 255, 0), 2)
			cv2.putText(od_frame, 'TIME:{:.3f}'.format(fps.elapsed()), (20, int(height*3/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.8, (0, 255, 0), 2)
			cv2.putText(od_frame, 'AVE_FPS: {:.3f}'.format(fps.fps()), (20, int(height*4/20)), cv2.FONT_HERSHEY_SIMPLEX,
						0.7, (0, 0, 255), 2)
			cv2.imshow('Video', od_frame)
		# 打印当前帧处理所花的时间
		print('[INFO] elapsed time: {:.5f}'.format(time.time() - frame_start_time))

	fps.stop()
	# 打印总时间
	print('[INFO] elapsed time (total): {:.4f}'.format(fps.elapsed()))
	# 打印平均帧率
	print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

	cv2.destroyAllWindows()
Esempio n. 9
0
def web():
    run = True
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=480,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=2,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while run is True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
def worker2(mid_q, output_q):
    fps = FPS().start()
    ser = serial.Serial('/dev/tty.HC-05-DevB')  # 注意选择串口号
    go = "1\n"
    turn_right = "2\n"
    stop = "3\n"
    turn_left = "4\n"
    back = "5\n"
    hand_put = "6\n"
    hand_catch = "7\n"
    go = go.encode('UTF-8')
    turn_right = turn_right.encode('UTF-8')
    stop = stop.encode('UTF-8')
    turn_left = turn_left.encode('UTF-8')
    back = back.encode('UTF-8')
    hand_put = hand_put.encode('UTF-8')
    hand_catch = hand_catch.encode('UTF-8')
    mid_loss = 0
    pri_loss = 0
    t_print = 0
    while True:
        fps.update()
        lb = output_q.get()
        hsv = cv2.cvtColor(lb, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, green_min, green_max)
        res = cv2.bitwise_and(lb, lb, mask=mask)
        kernel = np.ones((5, 5), np.uint8)
        kernel2 = np.ones((3, 3), np.uint8)
        dst = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
        res = cv2.erode(dst, kernel2, iterations=3)
        edges = cv2.Canny(res, 0, 30, 3)
        edges = np.array(edges)
        im = Image.fromarray(edges)
        width, height = im.size
        row_num = np.zeros(width)
        #line_num = np.zeros(height)
        #line_index=np.zeros(4)
        row_index = np.zeros(4)
        row_c = 0
        #line_c=0
        for i in range(0, height):
            for j in range(0, width):
                if edges[i, j] > 100:
                    #line_num[i]=line_num[i]+1
                    row_num[j] = row_num[j] + 1

        for i in range(0, 4):

            ac = row_num.argmax(axis=0)
            if row_num[ac] > 30:
                row_index[i] = ac
                row_num[ac] = 0
                row_c = row_c + 1


#           ab=line_num.argmax(axis=0)
#
#            if line_num[ab]>30:
#                line_index[i]=ab
#                line_num[ab]=0
#                line_c=line_c+1
        if row_c > 0:
            row_mid = (int)((np.mean(row_index)) * 4 / row_c)
        else:
            row_mid = np.mean(row_index)

        row_mid_de = int(width / 2)
        pri_loss = mid_loss
        mid_loss = row_mid - row_mid_de  #大于0,图像在中点右边;小于0,图像在中点左边
        if time.time() - t_print > 1.8:
            t_print = time.time()
            if mid_loss > 30:
                ser.write(turn_right)
                print("R")
            elif mid_loss < (-30):
                ser.write(turn_left)
                print("L")
            elif mid_loss > -30 or mid_loss < 30:
                ser.write(go)
                print("正了")

    #time.sleep(0.3)

    fps.stop()
    sess.close()