default=0, help='Device index of the camera.')
    parser.add_argument('-wd', '--width', dest='width', type=int,
                        default=480, help='Width of the frames in the video stream.')
    parser.add_argument('-ht', '--height', dest='height', type=int,
                        default=360, help='Height of the frames in the video stream.')
    args = parser.parse_args()

    input_q = Queue(5)  # fps is better if queue is higher but then more lags
    output_q = Queue()
    for i in range(1):
        t = Thread(target=worker, args=(input_q, output_q))
        t.daemon = True
        t.start()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        if output_q.empty():
            pass  # fill up queue
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            data = output_q.get()
            rec_points = data['rect_points']
Example #2
0
    sess.close()

if __name__ == '__main__':

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=5)
    output_q = Queue(maxsize=5)
    mid_q = Queue(maxsize=5)
    pool = Pool(3, worker, (input_q, mid_q))
    pool2 = Pool(3, worker2, (mid_q, output_q))
    # webcamera

    cam_url = 'http://192.168.8.1/html/index?video.html'
    video_capture = WebcamVideoStream(src=cam_url, width=640,
                                      height=480).start()
    #video_capture=cv2.VideoCapture(0)

    fps = FPS().start()
    t_start = time.time()
    out = None
    while True:  # fps._numFrames < 120
        if time.time() - t_start > 1.2:  # 输出图像的延迟
            t_start = time.time()
            frame = video_capture.read()
            frame = cv2.resize(frame, (int(640), int(480)))
            input_q.put(frame)

            t = time.time()
            out = mid_q.get()
            output_q.put(out)
    """
    parser.add_argument('-wd', '--width', dest='width', type=int,
                        default=480, help='Width of the frames in the video stream.')
    parser.add_argument('-ht', '--height', dest='height', type=int,
                        default=360, help='Height of the frames in the video stream.')
    """

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
        category_index=category_index,
        min_score_thresh=.5)

    return image_np, rect_points, class_names, class_colors


def close():
    sess.close()


camera_width = 480
camera_height = 360
camera_source = 0

cameraObj = WebcamVideoStream(src=camera_source,
                              width=camera_width,
                              height=camera_height)
video_capture = cameraObj.start()

detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

    sess = tf.Session(graph=detection_graph)

while True:
    frame = video_capture.read()
Example #5
0
            # if out is not None:
            #     cv2.imshow("Video", out)
            # else:
            #     cv2.imshow("Video", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if (args.stream):
        print('Reading from hls stream.')
        video_capture = HLSVideoStream(src=args.stream).start()
    else:
        print('Reading from webcam.')
        video_capture = WebcamVideoStream(src=args.video_source,
                                          width=args.width,
                                          height=args.height).start()

    # fps = FPS().start()
    #
    # while True:  # fps._numFrames < 120
    #     frame = video_capture.read()
    #     frame=cv2.flip(frame,1)
    #     input_q.put(frame)
    #
    #     t = time.time()
    #
    #     output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
    #     cv2.imshow('Video', output_rgb)
    #     fps.update()
    #
Example #6
0
        # Only measure the time taken by YOLO and API Call overhead

        # dark_frame = Image(frame)
        # results = net.detect(dark_frame)
        # del dark_frame
        #
        # end_time = time.time()
        # print("Elapsed Time  :",end_time-start_time)

        # for cat, score, bounds in results:
        #     x, y, w, h = bounds
        #     cv2.rectangle(frame, (int(x-w/2),int(y-h/2)),(int(x+w/2),int(y+h/2)),(255,0,0))
        #     cv2.putText(frame, str(cat.decode("utf-8")), (int(x), int(y)), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0))
        ret, jpeg = cv2.imencode('.jpeg', frame)
        jpeg = jpeg.tobytes()
        # jpeg = jpeg.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + jpeg + b'\r\n\r\n')


@app.route('/video_feed')
def video_feed():
    return Response(gen(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == '__main__':
    video_capture = WebcamVideoStream(src=args.video, width=600,
                                      height=600).start()
    app.run(host='0.0.0.0', port=8000, debug=False)
Example #7
0
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()
    i = 0
    prev_cent = 300
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src='lyrics.mp4',
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    #video capture
    out = cv2.VideoWriter('outpy.mp4',
                          cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), 23,
                          (480, 240))

    #sqls = "CREATE TABLE IF NOT EXISTS VEHICLE_LOG(Camera_id VARCHAR(32),\
    #Entry_Count INT, Exit_Count INT,\
    #Project_id VARCHAR(32), Object_id VARCHAR(32),\
    #Remark VARCHAR(32), timestmp VARCHAR(45))"
    #cursor.execute(sqls)
    #db.commit()
    while True:  # fps._numFrames < 120
class Realtime:
    """
    Read and apply object detection to input video stream
    """
    def __init__(self, args):
        self.display = args["display"] == 1
        self.queue_input = None
        self.queue_output = None
        self.pool = None
        self.vs = None
        self.fps = None

        self.start_queue(args["logger_debug"], args["queue_size"],
                         args["num_workers"])
        self.start_stream(args["input_device"])

    def start_queue(self, debugger, size, workers):
        """
        Starts processing queue.
        """

        if debugger:
            logger = multiprocessing.log_to_stderr()
            logger.setLevel(multiprocessing.SUBDEBUG)

        self.queue_input = Queue(maxsize=size)
        self.queue_output = Queue(maxsize=size)
        self.pool = Pool(workers, worker,
                         (self.queue_input, self.queue_output))

    def start_stream(self, device):
        """
        Create a threaded video stream and start the FPS counter.
        """

        self.vs = WebcamVideoStream(src=device).start()
        self.fps = FPS().start()

    def start(self):
        """
        Start processing video feed.
        """

        if self.display:
            print()
            print(
                "====================================================================="
            )
            print(
                "Starting video acquisition. Press 'q' (on the video windows) to stop."
            )
            print(
                "====================================================================="
            )
            print()

        # Start reading and treating the video stream
        running = True
        while running:
            running = self.capture()

        self.destroy()

    def capture(self):
        """
        Capture and process video frame.
        """

        if cv2.waitKey(1) & 0xFF == ord('q'):
            return False

        # Capture frame-by-frame
        ret, frame = self.vs.read()

        # No new frame, try again
        if not ret:
            return True

        # Place frame in queue
        self.queue_input.put(frame)

        # Display the resulting frame
        if self.display:
            cv2.imshow(
                'frame',
                cv2.cvtColor(self.queue_output.get(), cv2.COLOR_RGB2BGR))
            self.fps.update()

        return True

    def destroy(self):
        """
        Stop threads and hide OpenCV frame.
        """

        # When everything done, release the capture
        self.fps.stop()
        self.pool.terminate()
        self.vs.stop()

        cv2.destroyAllWindows()
                        type=int,
                        default=HEIGHT,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-a-path',
                        '--animation-path',
                        dest='animation_path',
                        type=str,
                        default="animation_full",
                        help='Path of folder containing animation frames.')

    args = parser.parse_args()

    ar = AR()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=WIDTH,
                                      height=HEIGHT).start()

    fps = FPS().start()

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    while True:
Example #10
0
    FRGraph = FaceRecGraph()
    aligner = AlignCustom()
    extract_feature = FaceFeature(FRGraph)
    face_detect = MTCNNDetect(FRGraph, scale_factor=2)

    input_q = Queue(5)  # fps is better if queue is higher but then more lags
    output_q = Queue()
    for i in range(1):
        t = Thread(target=worker, args=(input_q, output_q))
        t.daemon = True
        t.start()

    video_capture = WebcamVideoStream(
        src=
        'rtsp://*****:*****@192.168.0.25:554/PSIA/streaming/channels/801',
        width=args.width,
        height=args.height).start()
    # video_capture = WebcamVideoStream(src='rtsp://*****:*****@192.168.0.25:554/PSIA/streaming/channels/301',
    #                                   width=args.width,
    #                                   height=args.height).start()
    fps = FPS().start()

    while True:
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        if output_q.empty():
            pass  # fill up queue
Example #11
0
def main(argv):

    print("\n---------- Starting object detection ----------\n")

    # Instantiate an ObjectDetector class object
    # Takes the name of the model graph as an argument
    ObjectFinder = ObjectDetector('frozen_inference_graph.pb')

    # Initialize a parser object
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=1080,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=720,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=25,
                        help='Size of the queue.')
    args = parser.parse_args()

    # Initialize a logger object
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)
    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, ObjectFinder.worker, (input_q, output_q))
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # ------------------------------Control Loop ------------------------------
    fps = FPS().start()
    # fps._numFrames < 120
    frame_number = 0
    while True:
        frame_number += 1
        # Frame is a numpy nd array
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()
        print(
            "[INFO] elapsed time: {0:.3f}\nFrame number: {1}-------------------------------"
            .format((time.time() - t), frame_number))
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break
    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
                        default=360, help='Height of the frames in the video stream.')
    parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
                        default=2, help='Number of workers.')
    parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
                        default=5, help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=0,
                                      width=WIDTH,
                                      height=HEIGHT).start()
    fps = FPS().start()
    
    
    while True:  
        frame = video_capture.read()
        input_q.put(frame)
        
        t = time.time()        
        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()
        
        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
ap.add_argument("-d", "--detection-release", type=int, default=5,
                help="Detection release (s)")
ap.add_argument("-c", "--output-codec", type=str, default="mkv",
                help="Ouput video codec")
ap.add_argument("-l", "--display-level", type=int, default=0,
                help="Display level (0: no display)")
ap.add_argument("--record-camera", "--record-camera", type=int,
                default=0, help="Record device (720p)")
ap.add_argument("--detector-camera", "--detector-camera", type=int,
                default=0, help="detector device (480p)")
args = vars(ap.parse_args())

# Get input video stream(s)
print("Start video stream(s)")
detector_vs = None
record_vs = WebcamVideoStream(src=args['record_camera'], HD=True).start()
print("record device: FPS {} / Width {} / Height {}".format(
        record_vs.getFPS(), record_vs.getWidth(), record_vs.getHeight()))

if args['detector_camera'] != args['record_camera']:
        detector_vs = WebcamVideoStream(src=args['detector_camera']).start()
        print("detector device: FPS {} / Width {} / Height {}".format(
                detector_vs.getFPS(), detector_vs.getWidth(),
                detector_vs.getHeight()))
time.sleep(2.0)

# Define the output codec and create VideoWriter object
if args['output_codec'] == "avi":
        fourcc = cv2.VideoWriter_fourcc(*'XVID')  # avi
elif args['output_codec'] == "mkv":
        fourcc = cv2.VideoWriter_fourcc(*'X264')  # mkv
Example #14
0
def web():
    run = True
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=480,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=2,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while run is True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
Example #15
0
    with tf.Session(graph=detection_graph) as sess:
        # Definite input and output Tensors for detection_graph
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        detection_boxes = detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        detection_scores = detection_graph.get_tensor_by_name(
            'detection_scores:0')
        detection_classes = detection_graph.get_tensor_by_name(
            'detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        #src=0 --> /dev/video0. change src number according to your video node
        video_capture = WebcamVideoStream(src=0, width=720, height=480).start()
        fps = FPS().start()
        while (True):
            frame = video_capture.read()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_np_expanded = np.expand_dims(frame_rgb, axis=0)
            # Actual detection.
            (boxes, scores, classes,
             num) = sess.run([
                 detection_boxes, detection_scores, detection_classes,
                 num_detections
             ],
                             feed_dict={image_tensor: image_np_expanded})

            print('***************** Visualization ************')
            # Visualization of the results of a detection.
Example #16
0
    #menu_right_upper.grid(column=0,row=0)
    #menu_right_lower.grid(column=0,row=1)

    cam_output = tk.Label(root)

    cam_output.grid(column=0,row=0)
    menu_right.grid(row=0, column=1,sticky="nsew")

    for i in range(1):
        t = Thread(target=worker, args=(input_q, output_q))
        t.daemon = True
        t.start()

    video_capture = WebcamVideoStream(src=video_source,
                                      width=width,
                                      height=height).start()

    fps = FPS().start()
    t = time.time()
    alert_img = cv2.imread('alert.jpg',cv2.IMREAD_COLOR)
    display()
    root.mainloop()
    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    video_capture.stop()
    exit()
#activate tensorflow
#cd C:\Project\object_detector_app
#python object_detection_multithreading.py