コード例 #1
0
def worker(input_q, output_q):
    """
    Loads a frozen Tensorflow model in memory.
    :param input_q:
    :param output_q:
    :return:
    """
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(FROZEN_MODEL_PATH, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

        fps = FPS().start()
        while True:
            fps.update()
            frame = input_q.get()
            output_q.put(detect_objects(frame, sess, detection_graph))

        fps.stop()
        sess.close()
コード例 #2
0
def worker(input_q,output_q):
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(cam_loop(frame))

    fps.stop()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
コード例 #4
0
def main():
    """
    Main program.
    :return: void.
    """
    # Create a Thread pool.
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    # Load camera configuration.
    width, height = INPUT_RESOLUTION[args.quality]["width"], INPUT_RESOLUTION[args.quality]["height"]

    # Grab video input.
    video_capture = WebcamVideoStream(src=args.video_source, width=width, height=height).start()
    fps = FPS().start()

    # Read video input.
    while True:
        # Update framerate.
        fps.update()

        # Grab frame.
        frame = video_capture.read()

        # Send frame to AI.
        input_q.put(frame)

        # Show processed frame.
        cv.imshow("Webcam videostream ({width} x {height})".format(width=width, height=height), output_q.get())

        # Exit program on the Q click.
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()

    # End program properly.
    pool.terminate()
    video_capture.stop()
    cv.destroyAllWindows()
コード例 #5
0
        output_q.put(cam_loop(frame))

    fps.stop()


if __name__ == '__main__':
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=10)
    output_q = Queue(maxsize=10)
    pool = Pool(5,worker, (input_q,output_q))

    video_capture =  WebcamVideoStream('crashvideo5.mp4',800,600).start()

    fps = FPS().start()

    while True:
        frame = video_capture.read()
        input_q.put(frame)
        cv2.imshow("videos",output_q.get())
        fps.update()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break


    fps.stop()
    pool.terminate()
    # input_q.join()
    # output_q.join()
    video_capture.stop()
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    print(args.video_source)
    print(args.width)
    print(args.height)

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break