Esempio n. 1
0
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    label_cf = {}
    while True:

        img = cam.read()
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            img = vis.draw_bboxes(img, box, conf, cls)
            toc = time.time()
            curr_fps = 1.0 / (toc - tic)

            for num, cf in zip(cls, conf):
                label = label_map['{}'.format(num)]
                cf = float('%.2f' % cf)
                label_cf['{}'.format(label)] = cf
            print(label_cf, curr_fps)

            fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
            tic = toc
            label_cf = {}
Esempio n. 2
0
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type,writer):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    ret,img=cam.read()
    #fps_list=[]
    while ret:
        box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
        cls-=1
        img = vis.draw_bboxes(img, box, conf, cls)
        if show_fps:
            img = draw_help_and_fps(img, fps)
        #cv2.imshow(WINDOW_NAME, img)
        #writer.write(img)
        toc = time.time()
        curr_fps = 1.0 / (toc - tic)
        # calculate an exponentially decaying average of fps number
        fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
        tic = toc
        #fps_list.append(fps)
        ret,img=cam.read()
def main():
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False
    global args

    args = parse_args()
    logger.info('called with args: %s' % args)

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(args.labelmap_file)

    pb_path = './data/{}_trt.pb'.format(args.model)
    log_path = './logs/{}_trt'.format(args.model)
    if args.do_build:
        logger.info('building TRT graph and saving to pb: %s' % pb_path)
        build_trt_pb(args.model, pb_path)

    logger.info('opening camera device/file')
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    #tf_sess = tf.Session(config=tf_config, graph=trt_graph) -- Vincent
    #Solve : "unable to satfisfy explicit device /dev/CPU:0 -- Vincent
    tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                               log_device_placement=True),
                         graph=trt_graph)
    if args.do_tensorboard:
        logger.info('writing graph summary to TensorBoard')
        write_graph_tensorboard(tf_sess, log_path)

    logger.info('warming up the TRT graph with a dummy image')
    od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
    dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
    _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)

    cam.start()  # ask the camera to start grabbing images
    # grab image and do object detection (until stopped by user)
    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)
    open_display_window(cam.img_height, cam.img_width)
    result = loop_and_detect(cam, tf_sess, args.conf_th, vis, od_type=od_type)
    logger.info('cleaning up')
    cam.stop()  # terminate the sub-thread in camera
    tf_sess.close()
    cam.release()
    cv2.destroyAllWindows()
Esempio n. 4
0
def loop_and_detect(cam, tf_sess, conf_th, vis, model_name, filename,
                    save_file, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()

    _timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    _filename = os.path.splitext(os.path.basename(filename))[0]
    _video_path = './data/{}/{}'.format(MODELS[model_name].extract_dir,
                                        _filename + '_' + _timestamp + '.mp4')
    _fourcc = cv2.VideoWriter_fourcc(*'MP4V')

    img = cam.read()
    h, w, _ = img.shape
    if save_file:
        out = cv2.VideoWriter(_video_path, _fourcc, 2.0, (w, h))

    while cam.thread_running:
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            break

        img = cam.read()
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            img = vis.draw_bboxes(img, box, conf, cls)
            if show_fps:
                img = draw_help_and_fps(img, fps)
            # Write the frame into the file '_video_name'
            if save_file:
                out.write(img)
            cv2.imshow(WINDOW_NAME, img)
            toc = time.time()
            curr_fps = 1.0 / (toc - tic)
            # calculate an exponentially decaying average of fps number
            fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
            tic = toc

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps = not show_fps
        elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
            full_scrn = not full_scrn
            set_full_screen(full_scrn)
Esempio n. 5
0
def main():
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False

    args = parse_args()
    logger.info('called with args: %s' % args)

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(args.labelmap_file)

    pb_path = './data/{}_trt.pb'.format(args.model)
    log_path = './logs/{}_trt'.format(args.model)
    if args.do_build:
        logger.info('building TRT graph and saving to pb: %s' % pb_path)
        build_trt_pb(args.model, pb_path)

    logger.info('opening camera device/file')
    infile=args.filename
    cam=cv2.VideoCapture(infile)
    size=(int(cam.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    fps=cam.get(cv2.CAP_PROP_FPS)
    writer=cv2.VideoWriter('_res.'.join([f for f in infile.split('.')]),cv2.VideoWriter_fourcc(*'mp4v'), fps, size)

    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config, graph=trt_graph)

    if args.do_tensorboard:
        logger.info('writing graph summary to TensorBoard')
        write_graph_tensorboard(tf_sess, log_path)

    logger.info('warming up the TRT graph with a dummy image')
    od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
    dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
    _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)

    # grab image and do object detection (until stopped by user)
    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)
    #open_display_window(cam.img_width, cam.img_height)
    loop_and_detect(cam, tf_sess, 0.2, vis, od_type=od_type,writer=writer)

    logger.info('cleaning up')
    tf_sess.close()
    cam.release()
    writer.release()
def main():
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(DEFAULT_LABELMAP)

    pb_path = './data/{}_trt.pb'.format(DEFAULT_MODEL)
    log_path = './logs/{}_trt'.format(DEFAULT_MODEL)

    logger.info('opening camera device/file')

    url1 = 'http://pi1.local:8000/stream.mjpg'
    url2 = 'http://pi2.local:8000/stream.mjpg'  #'http://raspi3bp.local:4000/stream.mjpg'
    url3 = 'http://pi3.local:8000/stream.mjpg'  #'http://picamblack.local:5000/stream.mjpg'
    url4 = 'http://barcodepi.local:8000/stream.mjpg'  #'http://picam201902.local:3000/stream.mjpg'
    threaded = True
    stream_handler = VideoStreamHandler([url1, url2, url3, url4],
                                        threaded=threaded,
                                        resolution=(360, 640))
    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config, graph=trt_graph)

    logger.info('warming up the TRT graph with a dummy image')
    od_type = 'ssd'
    dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
    _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)

    # grab image and do object detection (until stopped by user)
    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)
    open_display_window(1280, 720)
    #if threaded:
    #stream_handler.start()
    loop_and_detect(stream_handler, tf_sess, 0.2, vis, od_type=od_type)
    stream_handler.close()
    logger.info('cleaning up')
    tf_sess.close()
    stream_handler.join_streams()
    cv2.destroyAllWindows()
Esempio n. 7
0
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    global one_fix_anchor
    while (one_fix_anchor == False):
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            break

        img = cam.read()
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            global client
            mqtt_client = client
            img = vis.draw_bboxes(img, box, conf, cls, mqtt_client)
            if show_fps:
                img = draw_help_and_fps(img, fps)
            cv2.imshow(WINDOW_NAME, img)
            cv2.imwrite('output.jpg', img)
            toc = time.time()
            curr_fps = 1.0 / (toc - tic)
            # calculate an exponentially decaying average of fps number
            fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
            tic = toc

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps = not show_fps
        elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
            full_scrn = not full_scrn
            set_full_screen(full_scrn)
        one_fix_anchor = True
        client.loop()
Esempio n. 8
0
def loop_and_detect(stream_handler, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      stream_handler: the stream handler object.
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = True
    fps = 0.0
    tic = time.time()
    while True:
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            break

        img = stream_handler.read_streams()
        box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
        cls -= 1
        img = vis.draw_bboxes(img, box, conf, cls)
        if show_fps:
            img = draw_help_and_fps(img, fps)
        cv2.imshow(WINDOW_NAME, img)
        toc = time.time()
        curr_fps = 1.0 / (toc - tic)
        # calculate an exponentially decaying average of fps number
        fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
        tic = toc

        key = cv2.waitKey(1)
        if key == ord('q') or key == ord('Q'):  # q key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps = not show_fps
        elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
            full_scrn = not full_scrn
            set_full_screen(full_scrn)
def main():
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    logging.getLogger('tensorflow').propagate = False


    logger.info('reading label map')
    cls_dict = read_label_map(DEFAULT_LABELMAP)

    pb_path = './data/{}_trt.pb'.format(DEFAULT_MODEL)
    log_path = './logs/{}_trt'.format(DEFAULT_MODEL)
    
    logger.info('opening ros camera device/file')

    cam = CImageSubscriber("/camera/color/image_raw")

    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config, graph=trt_graph)

    logger.info('warming up the TRT graph with a dummy image')
    od_type = 'faster_rcnn' if 'faster_rcnn' in DEFAULT_MODEL else 'ssd'
    dummy_img = np.zeros((480, 640, 3), dtype=np.uint8)
    _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)

    logger.info('starting to loop and detect')
    detection_conf_th =0.3
    vis = BBoxVisualization(cls_dict)
    open_display_window(640, 480)
    loop_and_detect(cam,tf_sess,  detection_conf_th, vis, od_type=od_type)

    logger.info('cleaning up')
    tf_sess.close()
    cv2.destroyAllWindows()
def main():

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False

    args = parse_args()
    logger.info('called with args: %s' % args)

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(args.labelmap_file)

    pb_path = './ssd_mobilenet_v1_coco_people/{}_trt.pb'.format(args.model)
    log_path = './logs/{}_trt'.format(args.model)
    print(log_path)
    if args.do_build:
        logger.info('building TRT graph and saving to pb: %s' % pb_path)
        build_trt_pb(args.model, pb_path)

        logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config, graph=trt_graph)

    od_type = 'ssd'

    logger.info('opening cameras')
    cam1 = cv2.VideoCapture(0)
    cam2 = cv2.VideoCapture(1)

    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)

    show_fps1 = True
    fps1 = 0.0
    show_fps2 = True
    fps2 = 0.0
    tic1 = time.time()
    tic2 = time.time()

    # grab images and do object detections (until stopped by user)
    while True:
        ret1, frame1 = cam1.read()
        ret2, frame2 = cam2.read()
        if ret1 and ret2:
            box1, conf1, cls1 = detect(frame1,
                                       tf_sess,
                                       args.conf_th,
                                       od_type=od_type)
            box2, conf2, cls2 = detect(frame2,
                                       tf_sess,
                                       args.conf_th,
                                       od_type=od_type)

            frame1 = vis.draw_bboxes(frame1, box1, conf1, cls1)
            frame2 = vis.draw_bboxes(frame2, box2, conf2, cls2)
            if show_fps1:
                frame1 = draw_help_and_fps(frame1, fps1)
            cv2.imshow(WINDOW_NAME1, frame1)
            toc1 = time.time()
            curr_fps1 = 1.0 / (toc1 - tic1)
            # calculate an exponentially decaying average of fps number
            fps1 = curr_fps1 if fps1 == 0.0 else (fps1 * 0.9 + curr_fps1 * 0.1)
            tic1 = toc1

            if show_fps2:
                frame2 = draw_help_and_fps(frame2, fps2)
            cv2.imshow(WINDOW_NAME2, frame2)
            toc2 = time.time()
            curr_fps2 = 1.0 / (toc2 - tic2)
            # calculate an exponentially decaying average of fps number
            fps2 = curr_fps2 if fps2 == 0.0 else (fps2 * 0.9 + curr_fps2 * 0.1)
            tic2 = toc2

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('F') or key == ord('f'):  # Toggle fps
            show_fps1 = not show_fps1

    logger.info('cleaning up and closing cameras')

    tf_sess.close()

    cam1.release()
    cam2.release()
    cv2.destroyAllWindows()
Esempio n. 11
0
def main():
    
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False

    args = parse_args()
    logger.info('called with args: %s' % args)

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(args.labelmap_file)

    pb_path = './ssd_mobilenet_v1_coco_people/{}_trt.pb'.format(args.model)
    log_path = './logs/{}_trt'.format(args.model)
    print(log_path)
    if args.do_build:
        logger.info('building TRT graph and saving to pb: %s' % pb_path)
        build_trt_pb(args.model, pb_path)

    logger.info('opening camera device/file')
    
    #cam1 = Camera(args)
   
    #cam1.open()
    #if not cam1.is_opened:
    #    sys.exit('Failed to open camera #1!')

    #cam1.start()  # ask the camera to start grabbing images


    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config, graph=trt_graph)

    od_type = 'ssd'

    cam2 = cv2.VideoCapture(1)
    
    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)
    
    show_fps1 = True
    fps1 = 0.0
    tic1 = time.time()
    while True:
        ret2, frame2 = cam2.read()
        if ret2:
            box, conf, cls = detect(frame2, tf_sess, args.conf_th, od_type=od_type)
            frame2 = vis.draw_bboxes(frame2, box, conf, cls)
            if show_fps1:
                frame2 = draw_help_and_fps(frame2, fps1)
            cv2.imshow(WINDOW_NAME2, frame2)
            toc1 = time.time()
            curr_fps1 = 1.0 / (toc1 - tic1)
            # calculate an exponentially decaying average of fps number
            fps1 = curr_fps1 if fps1 == 0.0 else (fps1*0.9 + curr_fps1*0.1)
            tic1 = toc1

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps1 = not show_fps1



    # grab image and do object detection (until stopped by user)
    
    #open_display_window(cam1.img_width, cam1.img_height)
    #open_display_window(args.image_width, args.image_height)
    #loop_and_detect(cam1, tf_sess, args.conf_th, vis, od_type=od_type)
    #loop_and_detect(cam2, tf_sess, args.conf_th, vis, od_type=od_type)

    
    logger.info('cleaning up')
    #cam1.stop()  # terminate the sub-thread in camera

    tf_sess.close()
    
    #cam1.release()
    cam2.release()
    cv2.destroyAllWindows()
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    tracks = []
    global rects, ct, temp, args
    frame_buff = 0
    none_buff = 0
    restart_flag = False
    backup_label = None
    while True:
        #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
        # Check to see if the user has closed the display window.
        # If yes, terminate the while loop.
        #    break
        if (restart_flag == True):
            cam = Camera(args)
            cam.open()
            cam.start()
            #pb_path = './data/{}_trt.pb'.format(args.model)
            #log_path = './logs/{}_trt'.format(args.model)
            #trt_graph = load_trt_pb(pb_path)
            #tf_config = tf.ConfigProto()
            #tf_config.gpu_options.allow_growth = True
            #tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True),graph=trt_graph)
            #od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
            dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
            _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)
            restart_flag = False

        rects = []
        img = cam.read()
        optical_flow_image = img
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            img = vis.draw_bboxes(img, box, conf, cls)
            objects = ct.update(rects)
            cv2.rectangle(img, (0, 980), (1920, 1075), (0, 0, 0), -1)

            for (objectID, centroid) in objects.items():
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (255, 0, 255),
                           -1)
                backup_label = str(objectID)

            cv2.putText(img, backup_label, (330, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255), 2,
                        cv2.LINE_AA)
            sys_clock = str(
                datetime.datetime.now()) + " Frame_buff=" + str(frame_buff)
            print(sys_clock)
            cv2.putText(img, sys_clock, (20, 950), cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (0, 0, 255), 2, cv2.LINE_AA)
            cv2.putText(img, "Traffic Counter: ", (20, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "Detector Type: Human", (400, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "Real Time Optical Trace :", (900, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "OFF", (1380, 1035), cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)
            """   
            if(frame_buff == 2000):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release() 
                restart_flag = True
                frame_buff = 0
                img = None
                cv2.destroyAllWindows()
            frame_buff += 1 
            """
            if (restart_flag == False):
                if show_fps:
                    img = draw_help_and_fps(img, fps)
                #set_full_screen(full_scrn)
                cv2.moveWindow(WINDOW_NAME, 0, 0)
                cv2.imshow(WINDOW_NAME, img)
                toc = time.time()
                curr_fps = 1.0 / (toc - tic)
                # calculate an exponentially decaying average of fps number
                fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
                tic = toc
        else:
            print("None Image  --> None Buff = {}".format(none_buff))
            none_buff += 1
            if (none_buff == 500):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release()
                restart_flag = True
                none_buff = 0
                img = None
                cv2.destroyAllWindows()

        if (restart_flag == False):
            key = cv2.waitKey(1)
            if key == 27:  # ESC key: quit program
                break
            elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                show_fps = not show_fps
            elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                full_scrn = not full_scrn
                set_full_screen(full_scrn)
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    feature_params = dict(maxCorners=1000, qualityLevel=0.1, minDistance=4, blockSize=7)
    lk_params = dict(winSize=(15, 15), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.02))
    tracks = []
    track_len = 8
    frame_idx = 0
    detect_interval = 10

    global rects,ct,temp,args,optical_enable #restart issue
    frame_buff = 0
    none_buff = 0
    restart_flag = False #restart issue
    backup_label = None  #restart issue
    while True:
        #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
        # Check to see if the user has closed the display window.
        # If yes, terminate the while loop.
        #    break
        if(restart_flag == True):
            cam = Camera(args)
            cam.open()
            cam.start()
            print("Camera is opened!")
            #pb_path = './data/{}_trt.pb'.format(args.model)
            #log_path = './logs/{}_trt'.format(args.model)
            #trt_graph = load_trt_pb(pb_path)
            #tf_config = tf.ConfigProto()
            #tf_config.gpu_options.allow_growth = True
            #tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True),graph=trt_graph)
            #od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
            dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
            _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)
            print("Loading dummy image!")
            restart_flag = False

        rects = []
        img = cam.read()
        if img is not None:
            optical_flow_image = img
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)    
            img = vis.draw_bboxes(img, box, conf, cls)
            #Optical Flow
            if (optical_enable==True):
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                if len(tracks) > 0:
                    img0 , img1 = prev_gray, frame_gray
                    p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1,1,2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                    p0r, _, _ = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                    d = abs(p0-p0r).reshape(-1,2).max(-1)
                    good = d < 1
                    new_tracks = []
                    for i, (tr, (x, y), flag) in enumerate(zip(tracks, p1.reshape(-1, 2), good)):
                        if not flag:
                            continue
                        tr.append((x,y))
                        if len(tr)> track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(img, (x, y), 2, (0, 255, 0), -1)

                    tracks = new_tracks
                    cv2.polylines(img, [np.int32(tr) for tr in tracks], False, (0, 255, 0), 1)
                    for box_coord in box:
                        y_min, x_min, y_max, x_max = box_coord[0], box_coord[1], box_coord[2], box_coord[3]
                        for tr in tracks:
                            tail = len(tr)
                            start_point = tr[0]
                            end_point = tr[tail-1]
                            if((start_point[0]<=x_max)and(start_point[0]>=x_min))and((end_point[0]<=x_max)and(end_point[0]>=x_min)):
                                if((start_point[1]<=y_max)and(start_point[1]>=y_min))and((end_point[1]<=y_max)and(end_point[1]>=y_min)):
                                    if(end_point[0]-start_point[0])>40:
                                        cv2.putText(img, "ENTER", (x_min+10,y_min+50),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,153),2,cv2.LINE_AA)
                                    elif(start_point[0]-end_point[0])>40:
                                        cv2.putText(img, "LEAVE", (x_min+10,y_min+50),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,153),2,cv2.LINE_AA)
                                    #elif(abs(start_point[0]-end_point[0])<10)and(abs(start_point[1]-end_point[1])<10):
                                    #    cv2.putText(img, "IDLE", (x_min+10,y_min-20),cv2.FONT_HERSHEY_TRIPLEX,1,(102,255,178),2,cv2.LINE_AA)
                            
                    
        

                                                            

                if frame_idx % detect_interval==0:
                    mask = np.zeros_like(frame_gray)
                    mask[:] = 255
                    if frame_idx !=0:
                        for x,y in [np.int32(tr[-1]) for tr in tracks]:
                            cv2.circle(mask, (x, y), 5, 0, -1)
                
                        p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1,2):
                                tracks.append([(x, y)])
                frame_idx+=1
                prev_gray = frame_gray
            #Optical Flow done
            cv2.rectangle(img, (0,980),(1920,1075),(0,0,0),-1)

            objects = ct.update(rects)
            for (objectID, centroid) in objects.items():
	        # draw both the ID of the object and the centroid of the
	        # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 2)
                cv2.putText(img, str(objectID), (330,1035),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (255,0,255), -1)   
                backup_label = str(objectID) 
            

            if(optical_enable == True):
                cv2.putText(img, "ON", (1380,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            else:
                cv2.putText(img, "OFF", (1380,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            sys_clock = str(datetime.datetime.now())
            cv2.putText(img, backup_label, (330,1035),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            cv2.putText(img, sys_clock, (20,950), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)
            cv2.putText(img, "Traffic Counter: ", (20,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)            
            cv2.putText(img, "Detector Type: Human", (400,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)
            cv2.putText(img, "Real Time Optical Trace :", (900,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)   
            #print(sys_clock) 
            if(restart_flag == False):      
                if show_fps:
                    img = draw_help_and_fps(img, fps)
                cv2.moveWindow(WINDOW_NAME,0,0)  #restart issue
                cv2.imshow(WINDOW_NAME, img)
                toc = time.time()
                curr_fps = 1.0 / (toc - tic)
                # calculate an exponentially decaying average of fps number
                fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
                tic = toc

        else:
            print("None Image  --> None Buff = {}".format(none_buff))
            none_buff+=1
            if(none_buff == 1000):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release() 
                restart_flag = True
                none_buff = 0
                #img = None
                cv2.destroyAllWindows()
 


        if(restart_flag== False):
            key = cv2.waitKey(1)
            if key == 27:  # ESC key: quit program
                break
            elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                show_fps = not show_fps
            elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                full_scrn = not full_scrn
                set_full_screen(full_scrn)
            elif key == ord ('P') or key == ord('p'):
                if(optical_enable == True):
                    optical_enable = False
                else:
                    optical_enable = True
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    #tic = time.time()
    tic = 0
    toc = 0
    global rects, ct, temp
    global person_x, person_y
    global leave_zone
    global leave_zone_2
    global leave_zone_counter
    global enter_zone
    zone_x_bed = 0
    zone_y_bed = 0

    zone_x_clean = 0
    zone_y_clean = 0

    #Boundary boxes for RTSP (low resolution)
    zone_x_min_bed, zone_y_min_bed, zone_x_max_bed, zone_y_max_bed = 366, 369, 521, 667
    zone_x_min_clean, zone_y_min_clean, zone_x_max_clean, zone_y_max_clean = 194, 300, 330, 420
    zone_x_min_door, zone_y_min_door, zone_x_max_door, zone_y_max_door = 620, 151, 674, 414
    zone_x_min_alchol, zone_y_min_alchol, zone_x_max_alchol, zone_y_max_alchol = 430, 329, 470, 356

    distance_thres_bed = 165
    distance_thres_clean = 105
    distance_thres_alchol = 100

    counter_msg = 0
    fail_msg = 0
    pass_msg = 0
    global hand_wash_status, args, client
    hd = 0
    wash_delay = 0
    invalid_id = []
    invalid_id.append(999)
    enter, leave = False, False
    restart_flag = False  #restart issue
    backup_label = None  #restart issue
    none_buff = 0  #restart issue
    previous_id = 999
    personal_status = []
    for i in range(0, 1000):
        personal_status.append(0)
    #CSV Log File
    if (5 > 2):
        while True:
            #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            #    break
            if (restart_flag == True):
                cam = Camera(args)
                cam.open()
                cam.start()
                print("Camera is opened!")
                dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
                _, _, _ = detect(dummy_img,
                                 tf_sess,
                                 conf_th=.3,
                                 od_type=od_type)
                print("Loading dummy image!")
                restart_flag = False

            rects = []
            img = cam.read()
            if img is not None:
                img = cv2.flip(img, 0)
                #check mqtt status
                #client.on_message = on_message
                #client.loop_forever()
                box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
                img, hd = vis.draw_bboxes(img, box, conf, cls)
                cv2.rectangle(img, (zone_x_min_bed, zone_y_min_bed),
                              (zone_x_max_bed, zone_y_max_bed),
                              (255, 102, 255), 2)
                zone_x_bed = int((zone_x_min_bed + zone_x_max_bed) / 2.0)
                zone_y_bed = int((zone_y_min_bed + zone_y_max_bed) / 2.0)
                cv2.circle(img, (zone_x_bed, zone_y_bed), 4, (255, 102, 255),
                           -1)
                cv2.putText(img, "Patient", (zone_x_bed - 40, zone_y_bed - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 102, 255), 1)
                cv2.rectangle(img, (zone_x_min_clean, zone_y_min_clean),
                              (zone_x_max_clean, zone_y_max_clean),
                              (255, 255, 51), 2)
                zone_x_clean = int((zone_x_min_clean + zone_x_max_clean) / 2.0)
                zone_y_clean = int((zone_y_min_clean + zone_y_max_clean) / 2.0)
                cv2.circle(img, (zone_x_clean, zone_y_clean), 4,
                           (255, 255, 51), -1)
                cv2.putText(img, "CLEANING ZONE",
                            (zone_x_clean - 110, zone_y_clean - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 51), 1)
                cv2.rectangle(img, (zone_x_min_door, zone_y_min_door),
                              (zone_x_max_door, zone_y_max_door),
                              (127, 0, 255), 2)
                zone_x_door = int((zone_x_min_door + zone_x_max_door) / 2.0)
                zone_y_door = int((zone_y_min_door + zone_y_max_door) / 2.0)
                cv2.circle(img, (zone_x_door, zone_y_door), 4, (127, 0, 255),
                           -1)
                cv2.putText(img, "ENTRANCE",
                            (zone_x_door - 40, zone_y_door - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (127, 0, 255), 1)
                cv2.rectangle(img, (zone_x_min_alchol, zone_y_min_alchol),
                              (zone_x_max_alchol, zone_y_max_alchol),
                              (255, 255, 51), 2)
                zone_x_alchol = int(
                    (zone_x_min_alchol + zone_x_max_alchol) / 2.0)
                zone_y_alchol = int(
                    (zone_y_min_alchol + zone_y_max_alchol) / 2.0)
                cv2.circle(img, (zone_x_alchol, zone_y_alchol), 4,
                           (255, 255, 51), -1)
                cv2.putText(img, "CLEANING ZONE",
                            (zone_x_alchol - 35, zone_y_alchol - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 51), 1)

                #ROI
                cv2.rectangle(img, (160, 130), (697, 721), (0, 255, 255), 2)

                distance_bed = 0
                distance_clean = 0
                #Detection Zone
                objects, valid_checker = ct.update(rects)
                flag = False
                leave_zone_counter = 0
                for ((objectID, centroid),
                     (objectID, valid)) in zip(objects.items(),
                                               valid_checker.items()):
                    # draw both the ID of the object and the centroid of the
                    # object on the output frame
                    #text_id = "ID {}".format(objectID)
                    text_id = "id = 0"
                    backup_label = str(objectID)
                    text = "staff"
                    cv2.putText(img, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(img, text_id,
                                (centroid[0] - 10, centroid[1] - 50),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0),
                               -1)
                    distance_bed = int(
                        math.sqrt((centroid[0] - zone_x_bed)**2 +
                                  (centroid[1] - zone_y_bed)**2))
                    distance_clean = int(
                        math.sqrt((centroid[0] - zone_x_clean)**2 +
                                  (centroid[1] - zone_y_clean)**2))
                    distance_clean_alchol = int(
                        math.sqrt((centroid[0] - zone_x_alchol)**2 +
                                  (centroid[1] - zone_y_alchol)**2))
                    enter = ct.display_enter_status(objectID)
                    leave = ct.display_leave_status(objectID)
                    flag = ct.display_hygiene(objectID)
                    if (distance_clean_alchol <= distance_thres_alchol):
                        cv2.line(img, (centroid[0], centroid[1]),
                                 (zone_x_alchol, zone_y_alchol), (0, 0, 255),
                                 1)
                    #    if(hand_wash_status == 1):
                    #        personal_status[objectID] = 1
                    #    ct.update_wash(True,objectID)

                    if (distance_bed <= distance_thres_bed):
                        #personal_status[0] = 0
                        hand_wash_status = 0
                        cv2.line(img, (centroid[0], centroid[1]),
                                 (zone_x_bed, zone_y_bed), (0, 255, 0), 1)
                        #Update Hygiene Status as the staff is originally cleaned
                        ct.update_hygiene(False, objectID)
                        """
                        #Never enter
                        if(enter == False):
                            ct.update_enter(True,objectID)
                            #If the staff did not wash hand and go to patient directly
                            hand_wash_flag = ct.display_wash(objectID) 
                            if(hand_wash_flag == False):
                                ct.update_valid(False,objectID)
                                m = 0
                                match = True
                                #Check whether this ID is marked as fail or not (on the screen)
                                while(m<len(invalid_id)):
                                    if(objectID==invalid_id[m]):
                                        #ObjectID is found in the invalid bank
                                        match = True
                                    else:
                                        match = False
                                    m+=1
                                #If it is not in the bank,then mark it and fail counter + 1
                                if(match == False):
                                    fail_msg +=1
                                    invalid_id.append(objectID)                                                            
                        #Enter again with uncleaned => invalid
                        else:
                            if(flag == False):
                                #Re-enter the patient zone
                                if((enter == True)and(leave == True)):
                                    ct.update_valid(False,objectID)
                                    m = 0
                                    match = True
                                    #Check whether this ID is marked as fail or not (on the screen)
                                    while(m<len(invalid_id)):
                                        if(objectID==invalid_id[m]):
                                            #ObjectID is found in the invalid bank
                                            match = True
                                        else:
                                            match = False
                                        m+=1
                                    #If it is not in the bank,then mark it and fail counter + 1
                                    if(match == False):
                                        fail_msg +=1
                                        invalid_id.append(objectID)
                    else:
                        if(enter == True):
                            ct.update_leave(True,objectID)    


                    """
                    if ((distance_clean <= distance_thres_clean) or
                        (distance_clean_alchol <= distance_thres_alchol)):
                        if (distance_clean <= distance_thres_clean):
                            cv2.line(img, (centroid[0], centroid[1]),
                                     (zone_x_clean, zone_y_clean),
                                     (51, 255, 255), 1)
                        #if(hand_wash_status == 1):
                        #    personal_status[0] = 1
                        #hand_wash_status = 1
                        #Update Hygiene Status
                        ct.update_hygiene(True, objectID)
                        #Reset IN/OUT Mechanism
                        ct.update_enter(False, objectID)
                        ct.update_leave(False, objectID)
                        ct.update_wash(True, objectID)
                    #Return hygiene status
                    flag = ct.display_hygiene(objectID)
                    #if(previous_id!=objectID):
                    #hand_wash_status = 0
                    #personal_status = 0
                    previous_id = objectID

                    with open('./path_analyzer/path_log.csv', 'a',
                              newline='') as csv_log_file:
                        log_writer = csv.writer(csv_log_file)

                        log_writer.writerow([
                            objectID, centroid[0], centroid[1],
                            int(distance_bed),
                            int(distance_clean),
                            int(distance_clean_alchol),
                            int(0),
                            int(0)
                        ])


#log_writer.writerow([objectID,centroid[0],centroid[1],int(distance_bed),int(distance_clean),int(distance_clean_alchol),int(hand_wash_status),int(personal_status[objectID])])
                    if(((centroid[0]>=zone_x_min_door)and(centroid[0]<=zone_x_max_door)) and \
                        ((centroid[1]>=zone_y_min_door)and(centroid[1]<=zone_y_max_door))):
                        person_x = centroid[0]
                        person_y = centroid[1]
                        leave_zone_counter += 1

                if ((leave_zone_counter == 0) and (enter_zone == True)
                        and (leave_zone == False)):
                    if (leave_zone_2 == False):
                        print("LEAVE 1!!!\r\n")
                        leave_zone = True
                    else:
                        print("LAEVE 2!!!\r\n")
                        enter_zone = False
                        leave_zone_2 = False

                #print("leave_zone_counter:",leave_zone_counter)
                #print("leave_zone_2:",leave_zone_2)
                #print("enter_zone:",enter_zone)
                #print("leave_zone:",leave_zone)

                #If any id passed through entrance
                if (((person_x > 0) and (person_y > 0))
                        and (leave_zone_counter > 0)):
                    font = cv2.FONT_HERSHEY_PLAIN
                    line = cv2.LINE_AA
                    if (enter_zone == False):
                        cv2.putText(img, "START", (40, 100), font, 3.0,
                                    (255, 0, 0), 4, line)
                        print("START\r\n")
                        enter_zone = True
                        start_time = time.time()
                        pp = ' { "sys_status" :"' + str(10) + '"}'
                        client.publish("MDSSCC/STATUS", pp)
                    else:
                        if (leave_zone == True):
                            cv2.putText(img, "COUNT", (40, 100), font, 3.0,
                                        (255, 0, 0), 4, line)
                            print("COUNT\r\n")
                            pp = ' { "sys_status" :"' + str(30) + '"}'
                            client.publish("MDSSCC/STATUS", pp)
                            leave_zone = False
                            leave_zone_2 = True
                            leave_zone_counter = 0

                person_x = 0
                person_y = 0

                print()

                if (restart_flag == False):
                    if show_fps:
                        img = draw_help_and_fps(img, fps)
                    #img = img[200:721, 160:697]
                    cv2.imshow(WINDOW_NAME, img)
                    cv2.setMouseCallback(
                        WINDOW_NAME,
                        pixel_info)  #Receive mouse click on HSV_Picker
                    #toc = time.time()
                    #curr_fps = 1.0 / (toc - tic)
                    # calculate an exponentially decaying average of fps number
                    #fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
                    #tic = toc

            else:
                print("None Image  --> None Buff = {}".format(none_buff))
                none_buff += 1
                if (none_buff == 1000):
                    print("[SYSTEM] VSTARCAMERA Restart")
                    cam.stop()  # terminate the sub-thread in camera
                    #tf_sess.close()
                    #tf.reset_default_graph()
                    #tf.contrib.keras.backend.clear_session()
                    cam.release()
                    restart_flag = True
                    none_buff = 0
                    #img = None
                    cv2.destroyAllWindows()

            hand_wash_status = 0
            if (restart_flag == False):
                key = cv2.waitKey(1)
                if key == 27:  # ESC key: quit program
                    break
                elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                    show_fps = not show_fps
                elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                    full_scrn = not full_scrn
                    set_full_screen(full_scrn)

        #client.loop_start()
        #client.loop_forever()
        client.reconnect()
Esempio n. 15
0
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    while cam.thread_running:
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            break

        img = cam.read()
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            (img, x_min, y_min, x_max, y_max, cf) = vis.draw_bboxes(img, box, conf, cls)

            # Draw the court boundaries
            height, width, channels = img.shape
            COURT_WIDTH=int(width/2)
            COURT_HEIGHT=int(height/2)
            (a,b,c,d) = (int(0),int(0),int(COURT_WIDTH),int(COURT_HEIGHT))
            (e,f,g,h) = (int(COURT_WIDTH),int(0),int(2*COURT_WIDTH),int(COURT_HEIGHT))
            black_img = np.copy(img) * 0
            cv2.rectangle(black_img, (a, b), (c, d), (0,0,255), -1)
            cv2.rectangle(black_img, (e, f), (g, h), (255,0,0), -1)
            cv2.putText(img,'court 1',(30,80), cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
            cv2.putText(img,'court 2',((COURT_WIDTH+30),80), cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
            cv2.putText(img,'out of bounds', (30,(COURT_HEIGHT+30)), cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
            img = cv2.addWeighted(img,0.8,black_img,0.4,0)
            
            if conf is None:
                continue
            if (cf >= 0.6):
                if (isIntersect(a,b,c,d,x_min,y_min,x_max,y_max) == True and \
                    (isIntersect(e,f,g,h,x_min,y_min,x_max,y_max) == True)):
                    if (abs(x_min+x_max)/2 <= (COURT_WIDTH)):
                        print("left")
                    else:
                        print("right")
                elif (isIntersect(a,b,c,d,x_min,y_min,x_max,y_max) == True):
                    print("left")
                elif (isIntersect(e,f,g,h,x_min,y_min,x_max,y_max) == True):
                    print("right")
                elif (isInbound(a,b,c,d,x_min,y_min,x_max,y_max) == True):
                    print("left")
                elif (isInbound(e,f,g,h,x_min,y_min,x_max,y_max) == True):
                    print("right")
                else:
                    print("out of bounds")

            if show_fps:
                img = draw_help_and_fps(img, fps)
            cv2.imshow(WINDOW_NAME, img)
            toc = time.time()
            curr_fps = 1.0 / (toc - tic)
            # calculate an exponentially decaying average of fps number
            fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
            tic = toc

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps = not show_fps
        elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
            full_scrn = not full_scrn
            set_full_screen(full_scrn)
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    distance = 0
    mid_x,mid_y = 0,0
    y_thresh = 50   

    blob_thresh = 1000


    while True:
        min_x = 500
        min_distance = 100000
        distance = 0

        #load published image
        img = cv2.imread('/home/vincent/vincent_dev/gazebo_ws/src/robot_vision/src/buffer.jpg')
        
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            break

        #img = cam.read()
        if img is not None:
            #make a copy for lane detection
            img_lane = img
 
            #Lane Detection
            #Verticeis of cropped polygon (aim at right lane)
            region_of_interest_vertices = [(img_width/2,img_height),(img_width/2,img_height/2+80),(img_width,img_height/2+80),(img_width,img_height)]
            lane_img = cv2.cvtColor(img_lane,cv2.COLOR_BGR2GRAY)
            edges = cv2.Canny(lane_img,50,200)
            cropped_image = region_of_interest(edges,np.array([region_of_interest_vertices],np.int32))
            #Debug ROI
            #dummy = cv2.resize(cropped_image, (640, 420))   
            #cv2.imshow('cropped',dummy)
            lines = cv2.HoughLinesP(cropped_image,1,np.pi/180,100,minLineLength=10,maxLineGap=250)
            #Lane Detection

            #AI-Core
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            img,vision_msg = vis.draw_bboxes(img, box, conf, cls)


            #Reference
            cv2.circle(img,(ref_x,ref_y),4,(0,255,0),-1)
            cv2.putText(img,"REFERENCE",(ref_x,ref_y-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),2)
            if lines is not None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    mid_x = int((x1+x2)/2)
                    mid_y = int((y1+y2)/2)
                    distance = int(math.sqrt((mid_x-ref_x)**2+(mid_y-ref_y)**2))
                    #ignore horizontal line and left lane
                    if(abs(y2-y1) > y_thresh)and(mid_x >= ref_x ):
                        #print("x1={} y1={} x2={} y2={} | Distance={}".format(x1,y1,x2,y2,distance))
                        #Find the shortest distance
                        if((mid_x>=ref_x)and(min_distance>distance)):
                            min_x = mid_x
                            min_distance=distance

                        cv2.line(img,(x1,y1),(x2,y2),(255,255,0),3)
                        cv2.circle(img,(mid_x,mid_y),4,(255,0,0),-1)  
                #print("Min X={} | Min_distance={}".format(min_x,min_distance))

            if show_fps:
                img = draw_help_and_fps(img, fps)

            #Display overall result             
            img = cv2.resize(img, (640, 420))   
            cv2.imshow(WINDOW_NAME, img)
            toc = time.time()
            curr_fps = 1.0 / (toc - tic)
            # calculate an exponentially decaying average of fps number
            fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
            tic = toc

            #publish ros controller message
            message_header = 'R'
            #Padding zero for distance
            msg_distance = "999"
            msg_length = len(str(min_distance))
            if(min_distance==0):
                #measure ref
                msg_distance = "160"
            if(msg_length==1):
                msg_distance = "00" + str(min_distance)
            if(msg_length==2):
                msg_distance = "0" + str(min_distance)
            if(msg_length==3):
                msg_distance = str(min_distance)
            #conclude AI detection
            publish_msg = message_header + msg_distance + vision_msg
            print("Published msg: ",publish_msg)
            """
            R245R_X_7134
            R: Right Lane Header
            245: distance
            R: turn right
            X: no stairs
            7134: ON AREA
            """
            msg_file = open('/home/vincent/vincent_dev/gazebo_ws/src/robot_control/src/ros_msg_bridge.txt','w')
            msg_file.write(publish_msg)

            publish_msg = None
            msg_distance = None
            vision_msg = None

        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'):  # Toggle help/fps
            show_fps = not show_fps
        elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
            full_scrn = not full_scrn
            set_full_screen(full_scrn)