Example #1
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()

    # import pdb
    # pdb.set_trace()

    if not cam.is_opened:
        sys.exit('[INFO]  Failed to open camera!')

    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    print('[INFO]  Camera: starting')
    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'TensorRT YOLOv3 Detector')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam,
                    args.runtime,
                    trt_yolov3,
                    conf_th=0.3,
                    vis=vis,
                    window_name=WINDOW_NAME)

    print('[INFO]  Program: stopped')
    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #2
0
def main():
    args = parse_args()
    if args.category_num <= 0:
        raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)
    if not os.path.isfile('yolo/%s.trt' % args.model):
        raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model)

    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        raise SystemExit('ERROR: failed to open camera!')

    cls_dict = get_cls_dict(args.category_num)
    yolo_dim = args.model.split('-')[-1]
    if 'x' in yolo_dim:
        dim_split = yolo_dim.split('x')
        if len(dim_split) != 2:
            raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
        w, h = int(dim_split[0]), int(dim_split[1])
    else:
        h = w = int(yolo_dim)
    if h % 32 != 0 or w % 32 != 0:
        raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)

    trt_yolo = TrtYOLO(args.model, (h, w), args.category_num)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT YOLO Demo')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #3
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    cam.start()
    # open_window(WINDOW_NAME, args.image_width, args.image_height,
                # 'Camera TensorRT YOLOv3 Demo')
    vis = BBoxVisualization(cls_dict)

    # for video
    # loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)
    
    # for single file
    detect_demo(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #4
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict(args.model)
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    cam.start()
    if args.use_console:
        loop_and_detect_console(cam,
                                trt_ssd,
                                conf_th=0.3,
                                loop=args.loop,
                                cls_dict=cls_dict)
    else:
        open_window(WINDOW_NAME, args.image_width, args.image_height,
                    'Camera TensorRT SSD Demo for Jetson Nano')
        vis = BBoxVisualization(cls_dict)
        loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #5
0
def main():
    args = parse_args()
    if args.category_num <= 0:
        raise SystemExit('Bad category_num: %d!' % args.category_num)

    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict(args.category_num)
    yolo_dim = int(args.model.split('-')[-1])
    if yolo_dim not in (288, 416, 608):
        raise SystemExit('Bad yolo_dim: %d!\nPlease make sure the model file name contains the correct dimension...' % yolo_dim)

    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim), args.category_num)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT YOLOv3 Demo')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #6
0
def main():
    args = parse_args()

    #YOLO INIT
    #cls_dict = get_cls_dict('coco')
    cls_dict = get_cls_dict('deepfamily')
    print("classes count : ", len(cls_dict))
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    print("yolo_dim : ", yolo_dim)
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    #CAMERA
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')
    cam.start()

    #CAM-WINDOW
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'DEEPFAMILY PROJECT - TensorRT YOLOv3')
    vis = BBoxVisualization(cls_dict)

    #DETECT-LOOP
    loop_and_detect(cam, trt_yolov3, conf_th=0.95, vis=vis)
    #loop_and_detect(cam, trt_yolov3, conf_th=0.95)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #7
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    trt_ssd = TrtSSD(args.model)

    cam.start()

    # initialize bot
    logger.info('initialize robot')
    robot = Robot()

    logger.info('starting to loop and detect')
    loop_and_detect(cam=cam,
                    trt_ssd=trt_ssd,
                    conf_th=0.3,
                    robot=robot,
                    model=args.model)

    logger.info('cleaning up')
    robot.stop()
    cam.stop()
    cam.release()
def main():
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # Ask tensorflow logger not to propagate logs to parent (which causes
    # duplicated logging)
    logging.getLogger('tensorflow').propagate = False
    global args

    args = parse_args()
    logger.info('called with args: %s' % args)

    # build the class (index/name) dictionary from labelmap file
    logger.info('reading label map')
    cls_dict = read_label_map(args.labelmap_file)

    pb_path = './data/{}_trt.pb'.format(args.model)
    log_path = './logs/{}_trt'.format(args.model)
    if args.do_build:
        logger.info('building TRT graph and saving to pb: %s' % pb_path)
        build_trt_pb(args.model, pb_path)

    logger.info('opening camera device/file')
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    logger.info('loading TRT graph from pb: %s' % pb_path)
    trt_graph = load_trt_pb(pb_path)

    logger.info('starting up TensorFlow session')
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    #tf_sess = tf.Session(config=tf_config, graph=trt_graph) -- Vincent
    #Solve : "unable to satfisfy explicit device /dev/CPU:0 -- Vincent
    tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                               log_device_placement=True),
                         graph=trt_graph)
    if args.do_tensorboard:
        logger.info('writing graph summary to TensorBoard')
        write_graph_tensorboard(tf_sess, log_path)

    logger.info('warming up the TRT graph with a dummy image')
    od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
    dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
    _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)

    cam.start()  # ask the camera to start grabbing images
    # grab image and do object detection (until stopped by user)
    logger.info('starting to loop and detect')
    vis = BBoxVisualization(cls_dict)
    open_display_window(cam.img_height, cam.img_width)
    result = loop_and_detect(cam, tf_sess, args.conf_th, vis, od_type=od_type)
    logger.info('cleaning up')
    cam.stop()  # terminate the sub-thread in camera
    tf_sess.close()
    cam.release()
    cv2.destroyAllWindows()
Example #9
0
def main():
    args = parse_args()
    cam = Camera(args)

    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    mtcnn = TrtMtcnn()
    cam.start()
    open_window(WINDOW_NAME, width=640, height=480, title='MTCNN Window')
    detect_faces(cam, mtcnn)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
    del mtcnn
Example #10
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        raise SystemExit('ERROR: failed to open camera!')

    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT SSD Demo for Jetson Nano')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #11
0
def main():
    args = parse_args()
    labels = np.loadtxt('googlenet/synset_words.txt', str, delimiter='\t')
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        raise SystemExit('ERROR: failed to open camera!')

    # initialize the tensorrt googlenet engine
    net = PyTrtGooglenet(DEPLOY_ENGINE, ENGINE_SHAPE0, ENGINE_SHAPE1)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT GoogLeNet Demo for Jetson Nano')
    loop_and_classify(cam, net, labels, args.crop_center)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    mtcnn = TrtMtcnn()

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT MTCNN Demo for Jetson TX2')
    loop_and_detect(cam, mtcnn, args.minsize)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()

    del (mtcnn)
def main():
    args = parse_args()
    labels = np.loadtxt('googlenet/synset_words.txt', str, delimiter='\t')
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cam.start()  # let camera start grabbing frames
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT GoogLeNet Demo for Jetson Nano')
    condition = threading.Condition()
    trt_thread = TrtGooglenetThread(condition, cam, labels, args.crop_center)
    trt_thread.start()  # start the child thread
    loop_and_display(condition)
    trt_thread.stop()  # stop the child thread

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #14
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        raise SystemExit('ERROR: failed to open camera!')

    cls_dict = get_cls_dict(args.model.split('_')[-1])

    cuda.init()  # init pycuda driver

    cam.start()  # let camera start grabbing frames
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT SSD Demo for Jetson Nano')
    vis = BBoxVisualization(cls_dict)
    condition = threading.Condition()
    trt_thread = TrtThread(condition, cam, args.model, conf_th=0.3)
    trt_thread.start()  # start the child thread
    loop_and_display(condition, vis)
    trt_thread.stop()  # stop the child thread

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    #tic = time.time()
    tic = 0
    toc = 0
    global rects, ct, temp
    global person_x, person_y
    global leave_zone
    global leave_zone_2
    global leave_zone_counter
    global enter_zone
    zone_x_bed = 0
    zone_y_bed = 0

    zone_x_clean = 0
    zone_y_clean = 0

    #Boundary boxes for RTSP (low resolution)
    zone_x_min_bed, zone_y_min_bed, zone_x_max_bed, zone_y_max_bed = 366, 369, 521, 667
    zone_x_min_clean, zone_y_min_clean, zone_x_max_clean, zone_y_max_clean = 194, 300, 330, 420
    zone_x_min_door, zone_y_min_door, zone_x_max_door, zone_y_max_door = 620, 151, 674, 414
    zone_x_min_alchol, zone_y_min_alchol, zone_x_max_alchol, zone_y_max_alchol = 430, 329, 470, 356

    distance_thres_bed = 165
    distance_thres_clean = 105
    distance_thres_alchol = 100

    counter_msg = 0
    fail_msg = 0
    pass_msg = 0
    global hand_wash_status, args, client
    hd = 0
    wash_delay = 0
    invalid_id = []
    invalid_id.append(999)
    enter, leave = False, False
    restart_flag = False  #restart issue
    backup_label = None  #restart issue
    none_buff = 0  #restart issue
    previous_id = 999
    personal_status = []
    for i in range(0, 1000):
        personal_status.append(0)
    #CSV Log File
    if (5 > 2):
        while True:
            #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the display window.
            # If yes, terminate the while loop.
            #    break
            if (restart_flag == True):
                cam = Camera(args)
                cam.open()
                cam.start()
                print("Camera is opened!")
                dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
                _, _, _ = detect(dummy_img,
                                 tf_sess,
                                 conf_th=.3,
                                 od_type=od_type)
                print("Loading dummy image!")
                restart_flag = False

            rects = []
            img = cam.read()
            if img is not None:
                img = cv2.flip(img, 0)
                #check mqtt status
                #client.on_message = on_message
                #client.loop_forever()
                box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
                img, hd = vis.draw_bboxes(img, box, conf, cls)
                cv2.rectangle(img, (zone_x_min_bed, zone_y_min_bed),
                              (zone_x_max_bed, zone_y_max_bed),
                              (255, 102, 255), 2)
                zone_x_bed = int((zone_x_min_bed + zone_x_max_bed) / 2.0)
                zone_y_bed = int((zone_y_min_bed + zone_y_max_bed) / 2.0)
                cv2.circle(img, (zone_x_bed, zone_y_bed), 4, (255, 102, 255),
                           -1)
                cv2.putText(img, "Patient", (zone_x_bed - 40, zone_y_bed - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 102, 255), 1)
                cv2.rectangle(img, (zone_x_min_clean, zone_y_min_clean),
                              (zone_x_max_clean, zone_y_max_clean),
                              (255, 255, 51), 2)
                zone_x_clean = int((zone_x_min_clean + zone_x_max_clean) / 2.0)
                zone_y_clean = int((zone_y_min_clean + zone_y_max_clean) / 2.0)
                cv2.circle(img, (zone_x_clean, zone_y_clean), 4,
                           (255, 255, 51), -1)
                cv2.putText(img, "CLEANING ZONE",
                            (zone_x_clean - 110, zone_y_clean - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 51), 1)
                cv2.rectangle(img, (zone_x_min_door, zone_y_min_door),
                              (zone_x_max_door, zone_y_max_door),
                              (127, 0, 255), 2)
                zone_x_door = int((zone_x_min_door + zone_x_max_door) / 2.0)
                zone_y_door = int((zone_y_min_door + zone_y_max_door) / 2.0)
                cv2.circle(img, (zone_x_door, zone_y_door), 4, (127, 0, 255),
                           -1)
                cv2.putText(img, "ENTRANCE",
                            (zone_x_door - 40, zone_y_door - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (127, 0, 255), 1)
                cv2.rectangle(img, (zone_x_min_alchol, zone_y_min_alchol),
                              (zone_x_max_alchol, zone_y_max_alchol),
                              (255, 255, 51), 2)
                zone_x_alchol = int(
                    (zone_x_min_alchol + zone_x_max_alchol) / 2.0)
                zone_y_alchol = int(
                    (zone_y_min_alchol + zone_y_max_alchol) / 2.0)
                cv2.circle(img, (zone_x_alchol, zone_y_alchol), 4,
                           (255, 255, 51), -1)
                cv2.putText(img, "CLEANING ZONE",
                            (zone_x_alchol - 35, zone_y_alchol - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 51), 1)

                #ROI
                cv2.rectangle(img, (160, 130), (697, 721), (0, 255, 255), 2)

                distance_bed = 0
                distance_clean = 0
                #Detection Zone
                objects, valid_checker = ct.update(rects)
                flag = False
                leave_zone_counter = 0
                for ((objectID, centroid),
                     (objectID, valid)) in zip(objects.items(),
                                               valid_checker.items()):
                    # draw both the ID of the object and the centroid of the
                    # object on the output frame
                    #text_id = "ID {}".format(objectID)
                    text_id = "id = 0"
                    backup_label = str(objectID)
                    text = "staff"
                    cv2.putText(img, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.putText(img, text_id,
                                (centroid[0] - 10, centroid[1] - 50),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0),
                               -1)
                    distance_bed = int(
                        math.sqrt((centroid[0] - zone_x_bed)**2 +
                                  (centroid[1] - zone_y_bed)**2))
                    distance_clean = int(
                        math.sqrt((centroid[0] - zone_x_clean)**2 +
                                  (centroid[1] - zone_y_clean)**2))
                    distance_clean_alchol = int(
                        math.sqrt((centroid[0] - zone_x_alchol)**2 +
                                  (centroid[1] - zone_y_alchol)**2))
                    enter = ct.display_enter_status(objectID)
                    leave = ct.display_leave_status(objectID)
                    flag = ct.display_hygiene(objectID)
                    if (distance_clean_alchol <= distance_thres_alchol):
                        cv2.line(img, (centroid[0], centroid[1]),
                                 (zone_x_alchol, zone_y_alchol), (0, 0, 255),
                                 1)
                    #    if(hand_wash_status == 1):
                    #        personal_status[objectID] = 1
                    #    ct.update_wash(True,objectID)

                    if (distance_bed <= distance_thres_bed):
                        #personal_status[0] = 0
                        hand_wash_status = 0
                        cv2.line(img, (centroid[0], centroid[1]),
                                 (zone_x_bed, zone_y_bed), (0, 255, 0), 1)
                        #Update Hygiene Status as the staff is originally cleaned
                        ct.update_hygiene(False, objectID)
                        """
                        #Never enter
                        if(enter == False):
                            ct.update_enter(True,objectID)
                            #If the staff did not wash hand and go to patient directly
                            hand_wash_flag = ct.display_wash(objectID) 
                            if(hand_wash_flag == False):
                                ct.update_valid(False,objectID)
                                m = 0
                                match = True
                                #Check whether this ID is marked as fail or not (on the screen)
                                while(m<len(invalid_id)):
                                    if(objectID==invalid_id[m]):
                                        #ObjectID is found in the invalid bank
                                        match = True
                                    else:
                                        match = False
                                    m+=1
                                #If it is not in the bank,then mark it and fail counter + 1
                                if(match == False):
                                    fail_msg +=1
                                    invalid_id.append(objectID)                                                            
                        #Enter again with uncleaned => invalid
                        else:
                            if(flag == False):
                                #Re-enter the patient zone
                                if((enter == True)and(leave == True)):
                                    ct.update_valid(False,objectID)
                                    m = 0
                                    match = True
                                    #Check whether this ID is marked as fail or not (on the screen)
                                    while(m<len(invalid_id)):
                                        if(objectID==invalid_id[m]):
                                            #ObjectID is found in the invalid bank
                                            match = True
                                        else:
                                            match = False
                                        m+=1
                                    #If it is not in the bank,then mark it and fail counter + 1
                                    if(match == False):
                                        fail_msg +=1
                                        invalid_id.append(objectID)
                    else:
                        if(enter == True):
                            ct.update_leave(True,objectID)    


                    """
                    if ((distance_clean <= distance_thres_clean) or
                        (distance_clean_alchol <= distance_thres_alchol)):
                        if (distance_clean <= distance_thres_clean):
                            cv2.line(img, (centroid[0], centroid[1]),
                                     (zone_x_clean, zone_y_clean),
                                     (51, 255, 255), 1)
                        #if(hand_wash_status == 1):
                        #    personal_status[0] = 1
                        #hand_wash_status = 1
                        #Update Hygiene Status
                        ct.update_hygiene(True, objectID)
                        #Reset IN/OUT Mechanism
                        ct.update_enter(False, objectID)
                        ct.update_leave(False, objectID)
                        ct.update_wash(True, objectID)
                    #Return hygiene status
                    flag = ct.display_hygiene(objectID)
                    #if(previous_id!=objectID):
                    #hand_wash_status = 0
                    #personal_status = 0
                    previous_id = objectID

                    with open('./path_analyzer/path_log.csv', 'a',
                              newline='') as csv_log_file:
                        log_writer = csv.writer(csv_log_file)

                        log_writer.writerow([
                            objectID, centroid[0], centroid[1],
                            int(distance_bed),
                            int(distance_clean),
                            int(distance_clean_alchol),
                            int(0),
                            int(0)
                        ])


#log_writer.writerow([objectID,centroid[0],centroid[1],int(distance_bed),int(distance_clean),int(distance_clean_alchol),int(hand_wash_status),int(personal_status[objectID])])
                    if(((centroid[0]>=zone_x_min_door)and(centroid[0]<=zone_x_max_door)) and \
                        ((centroid[1]>=zone_y_min_door)and(centroid[1]<=zone_y_max_door))):
                        person_x = centroid[0]
                        person_y = centroid[1]
                        leave_zone_counter += 1

                if ((leave_zone_counter == 0) and (enter_zone == True)
                        and (leave_zone == False)):
                    if (leave_zone_2 == False):
                        print("LEAVE 1!!!\r\n")
                        leave_zone = True
                    else:
                        print("LAEVE 2!!!\r\n")
                        enter_zone = False
                        leave_zone_2 = False

                #print("leave_zone_counter:",leave_zone_counter)
                #print("leave_zone_2:",leave_zone_2)
                #print("enter_zone:",enter_zone)
                #print("leave_zone:",leave_zone)

                #If any id passed through entrance
                if (((person_x > 0) and (person_y > 0))
                        and (leave_zone_counter > 0)):
                    font = cv2.FONT_HERSHEY_PLAIN
                    line = cv2.LINE_AA
                    if (enter_zone == False):
                        cv2.putText(img, "START", (40, 100), font, 3.0,
                                    (255, 0, 0), 4, line)
                        print("START\r\n")
                        enter_zone = True
                        start_time = time.time()
                        pp = ' { "sys_status" :"' + str(10) + '"}'
                        client.publish("MDSSCC/STATUS", pp)
                    else:
                        if (leave_zone == True):
                            cv2.putText(img, "COUNT", (40, 100), font, 3.0,
                                        (255, 0, 0), 4, line)
                            print("COUNT\r\n")
                            pp = ' { "sys_status" :"' + str(30) + '"}'
                            client.publish("MDSSCC/STATUS", pp)
                            leave_zone = False
                            leave_zone_2 = True
                            leave_zone_counter = 0

                person_x = 0
                person_y = 0

                print()

                if (restart_flag == False):
                    if show_fps:
                        img = draw_help_and_fps(img, fps)
                    #img = img[200:721, 160:697]
                    cv2.imshow(WINDOW_NAME, img)
                    cv2.setMouseCallback(
                        WINDOW_NAME,
                        pixel_info)  #Receive mouse click on HSV_Picker
                    #toc = time.time()
                    #curr_fps = 1.0 / (toc - tic)
                    # calculate an exponentially decaying average of fps number
                    #fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
                    #tic = toc

            else:
                print("None Image  --> None Buff = {}".format(none_buff))
                none_buff += 1
                if (none_buff == 1000):
                    print("[SYSTEM] VSTARCAMERA Restart")
                    cam.stop()  # terminate the sub-thread in camera
                    #tf_sess.close()
                    #tf.reset_default_graph()
                    #tf.contrib.keras.backend.clear_session()
                    cam.release()
                    restart_flag = True
                    none_buff = 0
                    #img = None
                    cv2.destroyAllWindows()

            hand_wash_status = 0
            if (restart_flag == False):
                key = cv2.waitKey(1)
                if key == 27:  # ESC key: quit program
                    break
                elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                    show_fps = not show_fps
                elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                    full_scrn = not full_scrn
                    set_full_screen(full_scrn)

        #client.loop_start()
        #client.loop_forever()
        client.reconnect()
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    feature_params = dict(maxCorners=1000, qualityLevel=0.1, minDistance=4, blockSize=7)
    lk_params = dict(winSize=(15, 15), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.02))
    tracks = []
    track_len = 8
    frame_idx = 0
    detect_interval = 10

    global rects,ct,temp,args,optical_enable #restart issue
    frame_buff = 0
    none_buff = 0
    restart_flag = False #restart issue
    backup_label = None  #restart issue
    while True:
        #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
        # Check to see if the user has closed the display window.
        # If yes, terminate the while loop.
        #    break
        if(restart_flag == True):
            cam = Camera(args)
            cam.open()
            cam.start()
            print("Camera is opened!")
            #pb_path = './data/{}_trt.pb'.format(args.model)
            #log_path = './logs/{}_trt'.format(args.model)
            #trt_graph = load_trt_pb(pb_path)
            #tf_config = tf.ConfigProto()
            #tf_config.gpu_options.allow_growth = True
            #tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True),graph=trt_graph)
            #od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
            dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
            _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)
            print("Loading dummy image!")
            restart_flag = False

        rects = []
        img = cam.read()
        if img is not None:
            optical_flow_image = img
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)    
            img = vis.draw_bboxes(img, box, conf, cls)
            #Optical Flow
            if (optical_enable==True):
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                if len(tracks) > 0:
                    img0 , img1 = prev_gray, frame_gray
                    p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1,1,2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                    p0r, _, _ = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                    d = abs(p0-p0r).reshape(-1,2).max(-1)
                    good = d < 1
                    new_tracks = []
                    for i, (tr, (x, y), flag) in enumerate(zip(tracks, p1.reshape(-1, 2), good)):
                        if not flag:
                            continue
                        tr.append((x,y))
                        if len(tr)> track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(img, (x, y), 2, (0, 255, 0), -1)

                    tracks = new_tracks
                    cv2.polylines(img, [np.int32(tr) for tr in tracks], False, (0, 255, 0), 1)
                    for box_coord in box:
                        y_min, x_min, y_max, x_max = box_coord[0], box_coord[1], box_coord[2], box_coord[3]
                        for tr in tracks:
                            tail = len(tr)
                            start_point = tr[0]
                            end_point = tr[tail-1]
                            if((start_point[0]<=x_max)and(start_point[0]>=x_min))and((end_point[0]<=x_max)and(end_point[0]>=x_min)):
                                if((start_point[1]<=y_max)and(start_point[1]>=y_min))and((end_point[1]<=y_max)and(end_point[1]>=y_min)):
                                    if(end_point[0]-start_point[0])>40:
                                        cv2.putText(img, "ENTER", (x_min+10,y_min+50),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,153),2,cv2.LINE_AA)
                                    elif(start_point[0]-end_point[0])>40:
                                        cv2.putText(img, "LEAVE", (x_min+10,y_min+50),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,153),2,cv2.LINE_AA)
                                    #elif(abs(start_point[0]-end_point[0])<10)and(abs(start_point[1]-end_point[1])<10):
                                    #    cv2.putText(img, "IDLE", (x_min+10,y_min-20),cv2.FONT_HERSHEY_TRIPLEX,1,(102,255,178),2,cv2.LINE_AA)
                            
                    
        

                                                            

                if frame_idx % detect_interval==0:
                    mask = np.zeros_like(frame_gray)
                    mask[:] = 255
                    if frame_idx !=0:
                        for x,y in [np.int32(tr[-1]) for tr in tracks]:
                            cv2.circle(mask, (x, y), 5, 0, -1)
                
                        p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1,2):
                                tracks.append([(x, y)])
                frame_idx+=1
                prev_gray = frame_gray
            #Optical Flow done
            cv2.rectangle(img, (0,980),(1920,1075),(0,0,0),-1)

            objects = ct.update(rects)
            for (objectID, centroid) in objects.items():
	        # draw both the ID of the object and the centroid of the
	        # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 2)
                cv2.putText(img, str(objectID), (330,1035),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (255,0,255), -1)   
                backup_label = str(objectID) 
            

            if(optical_enable == True):
                cv2.putText(img, "ON", (1380,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            else:
                cv2.putText(img, "OFF", (1380,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            sys_clock = str(datetime.datetime.now())
            cv2.putText(img, backup_label, (330,1035),cv2.FONT_HERSHEY_TRIPLEX,1,(255,255,255),2,cv2.LINE_AA)
            cv2.putText(img, sys_clock, (20,950), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)
            cv2.putText(img, "Traffic Counter: ", (20,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)            
            cv2.putText(img, "Detector Type: Human", (400,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)
            cv2.putText(img, "Real Time Optical Trace :", (900,1035), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),2,cv2.LINE_AA)   
            #print(sys_clock) 
            if(restart_flag == False):      
                if show_fps:
                    img = draw_help_and_fps(img, fps)
                cv2.moveWindow(WINDOW_NAME,0,0)  #restart issue
                cv2.imshow(WINDOW_NAME, img)
                toc = time.time()
                curr_fps = 1.0 / (toc - tic)
                # calculate an exponentially decaying average of fps number
                fps = curr_fps if fps == 0.0 else (fps*0.9 + curr_fps*0.1)
                tic = toc

        else:
            print("None Image  --> None Buff = {}".format(none_buff))
            none_buff+=1
            if(none_buff == 1000):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release() 
                restart_flag = True
                none_buff = 0
                #img = None
                cv2.destroyAllWindows()
 


        if(restart_flag== False):
            key = cv2.waitKey(1)
            if key == 27:  # ESC key: quit program
                break
            elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                show_fps = not show_fps
            elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                full_scrn = not full_scrn
                set_full_screen(full_scrn)
            elif key == ord ('P') or key == ord('p'):
                if(optical_enable == True):
                    optical_enable = False
                else:
                    optical_enable = True
def loop_and_detect(cam, tf_sess, conf_th, vis, od_type):
    """Loop, grab images from camera, and do object detection.

    # Arguments
      cam: the camera object (video source).
      tf_sess: TensorFlow/TensorRT session to run SSD object detection.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    show_fps = True
    full_scrn = False
    fps = 0.0
    tic = time.time()
    tracks = []
    global rects, ct, temp, args
    frame_buff = 0
    none_buff = 0
    restart_flag = False
    backup_label = None
    while True:
        #if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
        # Check to see if the user has closed the display window.
        # If yes, terminate the while loop.
        #    break
        if (restart_flag == True):
            cam = Camera(args)
            cam.open()
            cam.start()
            #pb_path = './data/{}_trt.pb'.format(args.model)
            #log_path = './logs/{}_trt'.format(args.model)
            #trt_graph = load_trt_pb(pb_path)
            #tf_config = tf.ConfigProto()
            #tf_config.gpu_options.allow_growth = True
            #tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True),graph=trt_graph)
            #od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd'
            dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8)
            _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type)
            restart_flag = False

        rects = []
        img = cam.read()
        optical_flow_image = img
        if img is not None:
            box, conf, cls = detect(img, tf_sess, conf_th, od_type=od_type)
            img = vis.draw_bboxes(img, box, conf, cls)
            objects = ct.update(rects)
            cv2.rectangle(img, (0, 980), (1920, 1075), (0, 0, 0), -1)

            for (objectID, centroid) in objects.items():
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (255, 0, 255),
                           -1)
                backup_label = str(objectID)

            cv2.putText(img, backup_label, (330, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255), 2,
                        cv2.LINE_AA)
            sys_clock = str(
                datetime.datetime.now()) + " Frame_buff=" + str(frame_buff)
            print(sys_clock)
            cv2.putText(img, sys_clock, (20, 950), cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (0, 0, 255), 2, cv2.LINE_AA)
            cv2.putText(img, "Traffic Counter: ", (20, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "Detector Type: Human", (400, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "Real Time Optical Trace :", (900, 1035),
                        cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.putText(img, "OFF", (1380, 1035), cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)
            """   
            if(frame_buff == 2000):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release() 
                restart_flag = True
                frame_buff = 0
                img = None
                cv2.destroyAllWindows()
            frame_buff += 1 
            """
            if (restart_flag == False):
                if show_fps:
                    img = draw_help_and_fps(img, fps)
                #set_full_screen(full_scrn)
                cv2.moveWindow(WINDOW_NAME, 0, 0)
                cv2.imshow(WINDOW_NAME, img)
                toc = time.time()
                curr_fps = 1.0 / (toc - tic)
                # calculate an exponentially decaying average of fps number
                fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1)
                tic = toc
        else:
            print("None Image  --> None Buff = {}".format(none_buff))
            none_buff += 1
            if (none_buff == 500):
                print("[SYSTEM] VSTARCAMERA Restart")
                cam.stop()  # terminate the sub-thread in camera
                #tf_sess.close()
                #tf.reset_default_graph()
                #tf.contrib.keras.backend.clear_session()
                cam.release()
                restart_flag = True
                none_buff = 0
                img = None
                cv2.destroyAllWindows()

        if (restart_flag == False):
            key = cv2.waitKey(1)
            if key == 27:  # ESC key: quit program
                break
            elif key == ord('H') or key == ord('h'):  # Toggle help/fps
                show_fps = not show_fps
            elif key == ord('F') or key == ord('f'):  # Toggle fullscreen
                full_scrn = not full_scrn
                set_full_screen(full_scrn)