def start_stream(self, device):
        """
        Create a threaded video stream and start the FPS counter.
        """

        self.vs = WebcamVideoStream(src=device).start()
        self.fps = FPS().start()
Exemple #2
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-src', '--source', dest='video_source', type=int,
                        default=0, help='Device index of the camera.')
    parser.add_argument('-wd', '--width', dest='width', type=int,
                        default=480, help='Width of the frames in the video stream.')
    parser.add_argument('-ht', '--height', dest='height', type=int,
                        default=360, help='Height of the frames in the video stream.')
    parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
                        default=2, help='Number of workers.')
    parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
                        default=5, help='Size of the queue.')
    args = parser.parse_args()

    #logger = multiprocessing.log_to_stderr()
    #logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        #frame = video_capture.read()
        time.sleep(.100)
        frame = cv2.imread("/Net/openpose/pred.jpg");
        if frame is None:
            print("ERROR!")
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        # print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
def worker(graph, input_q, output_q):
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        graph.LoadTensor(
            resize(frame / 255.0, dim, 1)[:, :, (2, 1, 0)].astype(np.float16),
            'user object')
        out, userobj = graph.GetResult()
        results = interpret_output(out.astype(np.float32), frame.shape[1],
                                   frame.shape[0])
        #print(results)

        try:
            if results[0][0] == "car":
                print("Object detected!")
                cv2.imwrite('frame.png', frame)
                print("Image captured!\n")
                print("Pushing image to server...\n")
                url = 'http://192.168.0.163:8080/ay'
                files = {'file': open('frame.png', 'rb')}
                r = requests.post(url, files=files)
        except:
            print("NO RESULTS\n")

        output_q.put((frame, results, frame.shape[1], frame.shape[0]))
        #output_q.put((frame, [], frame.shape[1], frame.shape[0]))
        #output_q.put(frame)
    #
    fps.stop()
Exemple #6
0
def worker(input_q, output_q, logQueue):
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(graph=detection_graph, config=config)

    fps = FPS().start()
    while True:
        try:
            fps.update()
            frame = input_q.get()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  #转换输出每帧的色彩
            output_q.put(
                detect_objects(frame_rgb, sess, detection_graph,
                               logQueue))  #将检测的每帧赋给输出
            pic = cv2.resize(output_q.get(), (1920, 1080),
                             interpolation=cv2.INTER_CUBIC)  # 显示屏幕大小
            cv2.imshow('frame', pic)  #展示检测后的输出
            if cv2.waitKey(1) == ord('q'):
                break
            while (output_q.qsize() > 200):
                output_q.get()
        except:
            #print("检测目标部分代码出现异常!")
            continue

    fps.stop()
    sess.close()
Exemple #7
0
def worker(graph, input_q, output_q):
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        graph.LoadTensor(
            resize(frame / 255.0, dim, 1)[:, :, (2, 1, 0)].astype(np.float16),
            'user object')
        out, userobj = graph.GetResult()
        results = interpret_output(out.astype(np.float32), frame.shape[1],
                                   frame.shape[0])
        #print(results)
        try:
            talk = list(results)
            cmd = (talk[0][0])
            cmd_beg = 'espeak '
            cmd_end = ' 2>/dev/null'  # To dump the std errors to /dev/null
            call([cmd_beg + cmd + cmd_end], shell=True)
        except:
            pass
        output_q.put((frame, results, frame.shape[1], frame.shape[0]))
        #output_q.put((frame, [], frame.shape[1], frame.shape[0]))
        #output_q.put(frame)
    #
    fps.stop()
Exemple #8
0
def worker(input_q, output_q):

    fps = FPS().start()
    image_tf = tf.placeholder(tf.float32, shape=(1, 240, 320, 3))
    hand_side_tf = tf.constant([[1.0,
                                 1.0]])  # Both left and right hands included
    evaluation = tf.placeholder_with_default(True, shape=())

    # build network
    net = ColorHandPose3DNetwork()
    hand_scoremap_tf, image_crop_tf, scale_tf, center_tf,\
        keypoints_scoremap_tf, keypoint_coord3d_tf = net.inference(image_tf, hand_side_tf, evaluation)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    net.init(sess)
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image_raw = scipy.misc.imresize(frame, (240, 320))
        image_v = np.expand_dims((image_raw.astype('float') / 255.0) - 0.5, 0)
        keypoint_coord3d_v = sess.run(keypoint_coord3d_tf,
                                      feed_dict={image_tf: image_v})
        output_q.put(
            predict_by_geometry(keypoint_coord3d_v, known_finger_poses, 0.45))

    fps.stop()
    sess.close()
Exemple #9
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    global count
    global ard
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))
        if ((classes[0] == 17 and score[0] >= 0.80)
                or (classes[0] == 88 and score[0] >= 0.80)):
            #cat 라벨 id 17번 person 1번
            cv2.imwrite("frame%d.jpg" % count, frame)
            ard.write([0])  #아두이노에 0값을 전달
            obj = ard.readline()  # 아두이노가 보낸 신호를 읽어옴
            kg = obj.decode().strip('\r\n')
            print(kg)  # 무게
            count += 1
    fps.stop()
    sess.close()
Exemple #10
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        h, w, x = frame_rgb.shape
        persons = detect_objects(frame_rgb, sess, detection_graph, h, w)
        if isinstance(persons, list):
            for p in persons:
                output_q.put(p)
        else:
            output_q.put(persons)
        # output_q.put(detect_objects(frame_rgb, sess, detection_graph, h, w))
        # output_q.put(frame_rgb)
    fps.stop()
    sess.close()
Exemple #11
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        session_conf = tf.ConfigProto(intra_op_parallelism_threads=2,
                                      inter_op_parallelism_threads=2)
        #config = tf.ConfigProto(device_count={'CPU': 1})
        sess = tf.Session(graph=detection_graph, config=session_conf)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        cnt = cv2.imencode('.png', frame)[1]
        b64 = base64.b64encode(cnt)
        b64 = b64.decode('utf-8')

        #text_file = open("/home/vcardoso/Output.txt", "w")
        #text_file.write("%s" % b64)
        #text_file.close()

        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph, b64))

    fps.stop()
    sess.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)
        mtcnn = detect_and_align.create_mtcnn(sess, None)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)        
        face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame_rgb, mtcnn)
        output = dict(face_boxes=padded_bounding_boxes)
        output_q.put(output)

    fps.stop()
    sess.close()
    cuda.select_device(0)
    cuda.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    accumulator = np.zeros(157,)
    while True:
        fps.update()
        frame = input_q.get()
        try:
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        except:
            continue
        output_q.put(recognize_activity(frame_rgb, sess, detection_graph, accumulator))

    fps.stop()
    sess.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    ### Add by me
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.33
    ### End add by me
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph,config=config)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))

    fps.stop()
    sess.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        config = tf.ConfigProto(intra_op_parallelism_threads=1,
                                inter_op_parallelism_threads=1,
                                allow_soft_placement=True,
                                device_count={'CPU': 1})
        #sess = tf.Session(graph=detection_graph)
        sess = tf.Session(graph=detection_graph, config=config)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Call detection
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))
        # Doesn't call detection
        #output_q.put(frame_rgb)

    fps.stop()
    sess.close()
def worker(graph, input_q, output_q):
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        graph.LoadTensor(
            resize(frame / 255.0, dim, 1)[:, :, (2, 1, 0)].astype(np.float16),
            'user object')
        out, userobj = graph.GetResult()
        results = interpret_output(out.astype(np.float32), frame.shape[1],
                                   frame.shape[0])
        #print(results)

        try:
            if results[0][0] == "car" or results[0][0] == "person" or results[
                    0][0] == "aeroplane" or results[0][0] == "bottle":
                print("Object detected! Attempting to capture image...")

                try:
                    #Capture a frame and save it as png file
                    cv2.imwrite(os.path.join(path, 'frame.png'), frame)
                    print("Image captured!\n")
                except:
                    print("Image capture FAILED!\n")
        except:
            print("NO RESULTS\n")

        output_q.put((frame, results, frame.shape[1], frame.shape[0]))
        #output_q.put((frame, [], frame.shape[1], frame.shape[0]))
        #output_q.put(frame)
    #
    fps.stop()
Exemple #17
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_tmp = detect_objects(frame_rgb, sess, detection_graph)
        alert_array = detect_objects_test(frame_rgb, sess, detection_graph)

        alert = False

        for q in alert_array:
            #print (q)
            if 'cell phone' in q:
                if q['cell phone'] > 70:  #manual rule example
                    alert = True
                    break
            else:
                alert = False

        if alert:
            #text_size, text_baseline = cv2.getTextSize('CELLPHONE USER DETECTED!', cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
            #pt1 = ((320 - text_size[0]) / 2, (240 + text_size[1]) / 2)
            cv2.putText(output_tmp, 'ALERT: CELLPHONE DETECTED!', (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2,
                        cv2.LINE_AA)
            output_q.put(
                cv2.copyMakeBorder(output_tmp,
                                   5,
                                   5,
                                   5,
                                   5,
                                   cv2.BORDER_CONSTANT,
                                   value=RED))
        else:
            output_q.put(
                cv2.copyMakeBorder(output_tmp,
                                   5,
                                   5,
                                   5,
                                   5,
                                   cv2.BORDER_CONSTANT,
                                   value=GREEN))

    fps.stop()
    sess.close()
def showcamera(video_capture):
    fps = FPS().start()
    while True:
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        if output_q.empty():
            pass  # fill up queue
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            data = output_q.get()
            rec_points = data['rect_points']
            class_names = data['class_names']
            class_colors = data['class_colors']
            for point, name, color in zip(rec_points, class_names,
                                          class_colors):
                cv2.rectangle(frame, (int(point['xmin'] * args.width),
                                      int(point['ymin'] * args.height)),
                              (int(point['xmax'] * args.width),
                               int(point['ymax'] * args.height)), color, 3)
                cv2.rectangle(
                    frame, (int(point['xmin'] * args.width),
                            int(point['ymin'] * args.height)),
                    (int(point['xmin'] * args.width) + len(name[0]) * 6,
                     int(point['ymin'] * args.height) - 10), color, -1,
                    cv2.LINE_AA)
                cv2.putText(frame, name[0], (int(point['xmin'] * args.width),
                                             int(point['ymin'] * args.height)),
                            font, 0.3, (0, 0, 0), 1)
            cv2.imshow('Video', frame)

        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()

    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))

    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
def worker(graph, input_q, output_q):
    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        graph.LoadTensor(resize(frame/255.0,dim,1)[:,:,(2,1,0)].astype(np.float16), 'user object')
        out, userobj = graph.GetResult()
        results = interpret_output(out.astype(np.float32), frame.shape[1], frame.shape[0])
        #print(results)
        output_q.put((frame, results, frame.shape[1], frame.shape[0]))
        #output_q.put((frame, [], frame.shape[1], frame.shape[0]))
        #output_q.put(frame)
    #
    fps.stop()
Exemple #20
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        #frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #output_q.put(face_recog(frame_rgb, sess, detection_graph))
        output_q.put(alpr(frame, sess, detection_graph))
    fps.stop()
    sess.close()
def main(args):

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)

    #profile.run('pool = Pool(args.num_workers, worker, (input_q, output_q))')
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    # Inside this function there is a thread providing frames
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    #PATH_TO_FILE = os.path.join(CWD_PATH, 'rtsp://192.168.0.109:554/user=admin&password=admin&channel=1&stream=0.sdp?')
    #video_capture = WebcamVideoStream(src=PATH_TO_FILE,
    #                                  width=args.width,
    #                                  height=args.height).start()

    fps = FPS().start()

    while True:  #fps._numFrames < 120:

        # Here the frames are read and placed into a Queue, which feeds a Pool
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        #print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    #print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    #print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
def draw_worker(input_q, output_q, num_detect_workers, track_gpu_id, rows,
                cols, detect_rate):
    """Detect and track buses from input and save image annotated with bounding boxes to output."""

    # Start detection processes.
    detect_worker_input_queues = [Queue(maxsize=MAX_QUEUE_SIZE)
                                  ] * num_detect_workers
    detect_worker_output_queues = [Queue(maxsize=MAX_QUEUE_SIZE)
                                   ] * num_detect_workers
    for worker_id in range(num_detect_workers):
        # TODO: Consider adding support for the case where worker_id != GPU_ID.
        Process(target=detect_worker,
                args=(detect_worker_input_queues[worker_id],
                      detect_worker_output_queues[worker_id],
                      worker_id)).start()

    # Start tracking process.
    track_input_queue = Queue(maxsize=MAX_QUEUE_SIZE)
    track_output_queue = Queue(maxsize=MAX_QUEUE_SIZE)
    track = Process(target=track_worker,
                    args=(track_input_queue, track_output_queue, track_gpu_id))
    track.start()

    # Annotate all new frames.
    fps = FPS().start()
    frame_num = -1
    tracked_box_ids = []
    undetected_counts = []
    box_colors = []
    next_box_id = 0
    while True:
        fps.update()
        frame = input_q.get()
        frame_num += 1
        if np.shape(frame) == ():
            continue
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        tracker_only = (frame_num % detect_rate) != 0
        img, boxes, next_box_id = find_objects(
            frame_rgb, detect_worker_input_queues, detect_worker_output_queues,
            track_input_queue, track_output_queue, rows, cols, tracked_box_ids,
            undetected_counts, box_colors, next_box_id, tracker_only)
        output_q.put((img, boxes))
    fps.stop()
    track.join()
Exemple #23
0
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
        config = tf.ConfigProto(device_count={'GPU':0})
        sess = tf.Session(graph=detection_graph, config=config)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
Exemple #24
0
    def worker(input_q, output_q):
        # Load a (frozen) Tensorflow model into memory.
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')

            sess = tf.Session(graph=detection_graph)

        fps = FPS().start()
        while True:
            fps.update()
            frame = input_q.get()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            output_q.put(detect_objects(frame_rgb, sess, detection_graph))

        fps.stop()
        sess.close()



    # def publish_detected_object(label):
    #         context = zmq.Context()
    #         socket = context.socket(zmq.PUB)
    #         addr = '127.0.0.1'  # remote ip or localhost
    #         port = "5556"  # same as in the pupil remote gui
    #         socket.bind("tcp://{}:{}".format(addr, port))
    #         time.sleep(1)
    #         while label is not None:
    #             topic = 'detected_object'
    #             #print ('%s %s' % (topic, label))
    #             try:
    #                 socket.send_string('%s %s' % (topic, label))
    #             except TypeError:
    #                 socket.send('%s %s' % (topic, label))
    #             break
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()

    i = 0

    while True:
        fps.update()
        frame, original_frame = input_q.get()

        # print("WORKER SHAPE" + str(original_frame.shape))

        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if i % ANALYZE_EVERY_N_FRAMES == 0:
            data = detect_objects(frame_rgb[int(frame_rgb.shape[0] * (1 - PIECE_TO_ANALYZE)):], sess, detection_graph)
            for point in data['rect_points']:
                point['ymin'] = (1-PIECE_TO_ANALYZE) + point['ymin'] * (PIECE_TO_ANALYZE)
                point['ymax'] = (1-PIECE_TO_ANALYZE) + point['ymax'] * (PIECE_TO_ANALYZE)
            i = 0
        # result = detect_objects(frame_rgb, sess, detection_graph)
        result = data
        result['frame'] = frame
        result['original_frame'] = original_frame
        output_q.put(result)
        i += 1

    fps.stop()
    sess.close()
    def videoLoop(self):

        # This try/except statement is a pretty ugly hack to get around
        # a RunTime error that Tkinter throws due to threading
        fTime = False
        try:
            # keep looping over frames until we are instructed to stop
            input_q = Queue(
                5)  # fps is better if queue is higher but then more lags
            output_q = Queue()
            for i in range(1):
                t = Thread(target=worker, args=(input_q, output_q))
                t.daemon = True
                t.start()
            fps = FPS().start()
            while not self.stopEvent.is_set():

                # grab the frame from the video stream and resize it to
                # have a maximum width of 1280 pixels
                self.frame = self.vs.read()
                self.frame = imutils.resize(self.frame, height=720)

                input_q.put(self.frame)

                t = time.time()

                font = cv2.FONT_HERSHEY_SIMPLEX
                data = output_q.get()
                rec_points = data['rect_points']
                class_names = data['class_names']
                class_colors = data['class_colors']
                for point, name, color in zip(rec_points, class_names,
                                              class_colors):
                    cv2.rectangle(
                        self.frame,
                        (int(point['xmin'] * 1280), int(point['ymin'] * 720)),
                        (int(point['xmax'] * 1280), int(point['ymax'] * 720)),
                        color, 3)
                    cv2.rectangle(
                        self.frame, (int(point['xmin'] * args.width),
                                     int(point['ymin'] * 720)),
                        (int(point['xmin'] * 1280) + len(name[0]) * 6,
                         int(point['ymin'] * 720) - 10), color, -1,
                        cv2.LINE_AA)
                    cv2.putText(
                        self.frame, name[0],
                        (int(point['xmin'] * 1280), int(point['ymin'] * 720)),
                        font, 0.3, (0, 0, 0), 1)

                # OpenCV represents images in BGR order; however PIL
                # represents images in RGB order, so we need to swap
                # the channels, then convert to PIL and ImageTk format
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(image)
                image = ImageTk.PhotoImage(image)

                # if the panel is not None, we need to initialize it
                if self.panel is None:

                    self.video_frame = tki.Frame(relief="raised",
                                                 borderwidth=5)
                    self.live_streem = tki.Label(self.video_frame,
                                                 text="Display")
                    self.live_streem.pack()

                    self.panel = tki.Label(self.video_frame, image=image)
                    self.panel.image = image
                    self.panel.pack(padx=10, pady=10)
                    self.video_frame.pack(side="top")

                # otherwise, simply update the panel

                else:
                    self.panel.configure(image=image)
                    self.panel.image = image
                if fTime == False:

                    mainframe = tki.Frame(self.root,
                                          relief="raised",
                                          borderwidth=5)

                    # Add a grid
                    select_cam_frame = tki.Frame(mainframe,
                                                 relief="raised",
                                                 borderwidth=5)
                    tki.Label(select_cam_frame,
                              text="Select an IP camera").pack()

                    # Create a Tkinter variable
                    tkvar_camera = tki.StringVar()

                    # Dictionary with options
                    #camera_choices = {'Camera 1 in 701','Camera 2 in 701','Camera 1 in 702','Camera 1 in 207'}
                    camera_choices = {
                        'Camera 1 in 701', 'Camera 2 in 701',
                        'Camera 1 in 702', 'Web Camera', 'Web Camera 2'
                    }
                    tkvar_camera.set(
                        'Camera 1 in 701')  # set the default option

                    tki.OptionMenu(select_cam_frame, tkvar_camera,
                                   *camera_choices).pack()

                    # on change dropdown value
                    def change_dropdown_camera(*args):
                        self._camera = tkvar_camera.get()
                        print(tkvar_camera.get())

                    # link function to change dropdown
                    tkvar_camera.trace('w', change_dropdown_camera)
                    select_cam_frame.pack(side="left")

                    select_model_frame = tki.Frame(mainframe,
                                                   relief="raised",
                                                   borderwidth=5)
                    tki.Label(select_model_frame,
                              text="Select an object detection model").pack()

                    # Create a Tkinter variable
                    tkvar_model = tki.StringVar()

                    # Dictionary with options
                    model_choices = {
                        'SSD Mobilenet V1', 'YOLO', 'Faster R-CNN',
                        'Mask R-CNN', 'ResNet'
                    }
                    tkvar_model.set(
                        'SSD Mobilenet V1')  # set the default option

                    tki.OptionMenu(select_model_frame, tkvar_model,
                                   *model_choices).pack()

                    # on change dropdown value
                    def change_dropdown_model(*args):
                        self._model = tkvar_model.get()
                        print(tkvar_model.get())

                    # link function to change dropdown
                    tkvar_model.trace('w', change_dropdown_model)

                    def run_button_pressed():
                        threading.Event().set()

                        self.vs.stop()

                        if self._camera == 'Camera 1 in 701':
                            # Camera 1 in 701
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.112:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 2 in 701':
                            # Camera 2 in 701
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.113:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 1 in 702':
                            # Camera 1 in 702
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.111.129:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Camera 1 in 207':
                            # Camera 1 in 207
                            self.vs = VideoStream(
                                'rtsp://*****:*****@192.168.0.53:554/doc/page/main.asp'
                            ).start()
                        elif self._camera == 'Web Camera':
                            # Web Camera
                            self.vs = VideoStream(0).start()
                        elif self._camera == 'Web Camera 2':
                            # Web Camera 2
                            self.vs = VideoStream(1).start()
                        self.thread = threading.Thread(target=self.videoLoop)
                        print('Run button pressed!')

                    select_model_frame.pack(side="left")

                    # Run Button
                    run = tki.Button(mainframe,
                                     text='Run',
                                     fg="blue",
                                     font=("Courier", 30),
                                     command=run_button_pressed)
                    run.pack(side="left")

                    # Quit Button
                    quitbutton = tki.Button(mainframe,
                                            text='Exit',
                                            fg="red",
                                            font=("Courier", 30),
                                            command=self.onClose)
                    quitbutton.pack(side="left")
                    mainframe.pack()

                    fTime = True

            video_capture.stop()
            cv2.destroyAllWindows()
            fps.stop()
        except RuntimeError as e:
            print("[INFO] caught a RuntimeError")
Exemple #27
0
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    # logger = multiprocessing.log_to_stderr()
    # logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        print('4', sys.path)
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    cam_url = 'http://*****:*****@192.168.0.134:8080/'
    video_capture = cv2.VideoCapture(cam_url)

    fps = FPS().start()
    t_start = time.time()

    # if (args.stream):
    #     print('Reading from hls stream.')
    #     video_capture = HLSVideoStream(src=args.stream).start()
    # else:
    #     print('Reading from webcam.')
    #     video_capture = WebcamVideoStream(src=args.video_source,
    #                                   width=args.width,
    #                                   height=args.height).start()

    #
    # fps = FPS().start()

    while True:  # fps._numFrames < 120
def inference(sess, img_np):
    fps = FPS().start()
    fps.update()
    output = detect_objects(img_np, sess)
    display_PIL(output)
    fps.stop()
        #Set Region of Interest (ROI)
        ROI_line_x = int(round(frame_width * 0.80, 0))
        ROI_line_y = int(round(frame_height * 0.45, 0))

        ROI_line_x2 = int(round(frame_width * 0.95, 0))
        ROI_line_y2 = int(round(frame_height * 0.8, 0))

        box1_x = int(round(frame_width * 0.6, 0))
        box1_y = int(round(frame_height * 0.1, 0))
        box2_x = int(round(frame_width * 0.6, 0))
        box2_y = int(round(frame_height * 0.15, 0))

        old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #video_capture = WebcamVideoStream('Original.mp4', width=args.width, height=args.height).start()
        fps = FPS().start()
        currentFrame = 0

        persondot = []
        licensedot = []
        threshold = 0.13
        last_frame_capture = 0
        last_frame_capture_license = 0

        people_notwearing = 0
        people_wearinghelmet = 0

        while (1):

            ret, frame = cap.read()
            if ret == True:
                        default=480, help='Width of the frames in the video stream.')
    parser.add_argument('-ht', '--height', dest='height', type=int,
                        default=360, help='Height of the frames in the video stream.')
    args = parser.parse_args()

    input_q = Queue(5)  # fps is better if queue is higher but then more lags
    output_q = Queue()
    for i in range(1):
        t = Thread(target=worker, args=(input_q, output_q))
        t.daemon = True
        t.start()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        if output_q.empty():
            pass  # fill up queue
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            data = output_q.get()
            rec_points = data['rect_points']
            class_names = data['class_names']
            class_colors = data['class_colors']
Exemple #32
0
    def run_demo(self, args, mirror=False):

        # Define the codec and create VideoWriter object
        height = args.demo_size
        width = int(4.0 / 3 * args.demo_size)

        swidth = int(width / 4)
        sheight = int(height / 4)
        if args.record:
            fourcc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
            out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (2 * width, height))

        # print(args.video_source)
        if args.video_source.isdigit():
            args.video_source = int(args.video_source)

        cam = cv2.VideoCapture(args.video_source)
        cam.set(3, width)
        cam.set(4, height)
        key = 0
        idx = 0
        #
        CWD_PATH = os.getcwd()
        resume = os.path.join(CWD_PATH, 'cp_deeplabv3+_20181009-14.45.37_epoch50_lr0.01_50-Finished.pth.tar')
        m_lane = LaneSeg(model='resnet50', param=resume)

        # --- prepare input image
        # from scipy import misc
        font = cv2.FONT_HERSHEY_SIMPLEX
        fps = FPS().start()
        stopflag = False
        count = 0
        while not stopflag:

            # read frame
            idx += 1
            ret_val, img = cam.read()
            if mirror:
                img = cv2.flip(img, 1)

            if img is None:
                break

            cimg = img.copy()
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # y_pred = m_lane.inference(np.array(img))

            h, w = 480, 640
            # -- binary label
            # binary = y_pred.astype(np.uint8)
            # imageio.imwrite('binary_{}.png'.format(filename), binary)
            # -- colored image
            result_rgb = np.zeros((h, w, 3), dtype=np.uint8)

            step1 = np.zeros((h * max_width), dtype=np.uint8)

            # filename = os.path.join(os.getcwd(), 's.png')
            #
            # step1 = misc.imread(filename)  # input, np array

            self.lock.acquire()
            self.data.add(bytes("hello!"))
            self.lock.release()
            # print('thread ',self.id,' product once, add ',count)
            if IS_DEBUG:
                stopflag = True
                imageio.imwrite('s.png', result_rgb)

            fps.update()

            text2 = '[INFO] approx. FPS: {:.2f}'.format(fps.fps())  # fps.fps()

            # text
            showimg = np.concatenate((result_rgb, cimg), axis=1)
            # 图像     文字内容   坐标                                                                     字体
            cv2.putText(showimg, text2, (0, 20), font,
                        # 大小  颜色 字体厚度
                        0.9, (255, 255, 255), 3)
            cv2.imshow('Demo', showimg)

            key = cv2.waitKey(1)
            if args.record:
                out.write(showimg)
            if key == 27:
                stopflag = True

            # # print(key)
            # space to pause and resume
            if key == 32:
                while True:
                    key = cv2.waitKey(1)
                    if key == 32:
                        break

                    if key == 27:
                        stopflag = True
                        break

                    if key == 115:
                        id = datetime.datetime.now()
                        sourceImgName = "{}_input.png".format(id)
                        resultImgName = "{}_output.png".format(id)
                        imageio.imwrite(sourceImgName, img)
                        imageio.imwrite(resultImgName, result_rgb)

        cam.release()
        if args.record:
            out.release()
        cv2.destroyAllWindows()