Example #1
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()

    # import pdb
    # pdb.set_trace()

    if not cam.is_opened:
        sys.exit('[INFO]  Failed to open camera!')

    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    print('[INFO]  Camera: starting')
    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'TensorRT YOLOv3 Detector')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam,
                    args.runtime,
                    trt_yolov3,
                    conf_th=0.3,
                    vis=vis,
                    window_name=WINDOW_NAME)

    print('[INFO]  Program: stopped')
    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #2
0
def main():
    args = parse_args()
    if args.category_num <= 0:
        raise SystemExit('Bad category_num: %d!' % args.category_num)

    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict(args.category_num)
    yolo_dim = int(args.model.split('-')[-1])
    if yolo_dim not in (288, 416, 608):
        raise SystemExit('Bad yolo_dim: %d!\nPlease make sure the model file name contains the correct dimension...' % yolo_dim)

    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim), args.category_num)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT YOLOv3 Demo')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #3
0
def main():
    args = parse_args()

    #YOLO INIT
    #cls_dict = get_cls_dict('coco')
    cls_dict = get_cls_dict('deepfamily')
    print("classes count : ", len(cls_dict))
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    print("yolo_dim : ", yolo_dim)
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    #CAMERA
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')
    cam.start()

    #CAM-WINDOW
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'DEEPFAMILY PROJECT - TensorRT YOLOv3')
    vis = BBoxVisualization(cls_dict)

    #DETECT-LOOP
    loop_and_detect(cam, trt_yolov3, conf_th=0.95, vis=vis)
    #loop_and_detect(cam, trt_yolov3, conf_th=0.95)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #4
0
def main():
    global THREAD_RUNNING
    args = parse_args()

    cap = open_cam_rtsp(args.cam_name, args.cam_password, args.cam_ip)
    if not cap.isOpened():
        sys.exit('Failed to open camera!')

    #抓取图像子进程
    THREAD_RUNNING = True
    th = threading.Thread(target=grab_img, args=(cap, ))
    th.start()

    #目标识别
    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))
    vis = BBoxVisualization(cls_dict)
    #考虑对roi区域裁剪并resize
    loop_and_detect(trt_yolov3, conf_th=0.3, vis=vis)

    #关闭图像子进程
    THREAD_RUNNING = False
    th.join()
    cap.release()
    cv2.destroyAllWindows()
Example #5
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    cam.start()
    # open_window(WINDOW_NAME, args.image_width, args.image_height,
                # 'Camera TensorRT YOLOv3 Demo')
    vis = BBoxVisualization(cls_dict)

    # for video
    # loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)
    
    # for single file
    detect_demo(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
def main():
    args = parse_args()
    check_args(args)

    results_file = 'yolov3_onnx/results_%s.json' % args.model
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))

    cocoGt = COCO(args.annotations)

    catIds = cocoGt.getCatIds(catNms=['stop sign'])
    imgIds_cat = cocoGt.getImgIds(catIds=catIds)

    # test = cocoGt.loadImgs(433892)
    # print(test)
    # print(imgIds_cat)
    jpgs = [cocoGt.loadImgs(ids)[0]['file_name'] for ids in imgIds_cat]

    # jpgs = [j for j in os.listdir(args.imgs_dir) if j.endswith('.jpg')]
    generate_results(trt_yolov3, args.imgs_dir, jpgs, results_file)

    # Run COCO mAP evaluation
    # Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb

    cocoDt = cocoGt.loadRes(results_file)
    imgIds = sorted(cocoGt.getImgIds())
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.params.imgIds = imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    print(cocoEval.summarize())
Example #7
0
 def feed_frames(self):
     """
     By Guanghao Zhang.
     直接将视频帧放入推送队列,不进行行人检测
     :return:
     """
     global unnormal_send, img_send, dist_time, interval, camera_sta, dist, put_img
     num = 0
     category_num = 80
     model = 'cfg/yolov3-416.trt'
     w, h = 416, 416
     conf_th = 0.7
     cuda.init()
     cuda_ctx = cuda.Device(0).make_context()  # GPU 0
     trt_yolo = TrtYOLOv3(model, (h, w), category_num)
     while True:
         start = time.time()
         frame = self.camera_queue.get()
         boxes, confs, clss = trt_yolo.detect(frame, conf_th)
         end_lab = change_lab(frame, boxes, clss)
         end_img, unnormal_num, distence = warnplay(frame,
                                                    np.array(end_lab),
                                                    interval)
         end = time.time()
         print(end - start)
         put_img = end_img
         # self.frame_queue.put(end_img)
         if unnormal_num > unnormal_send:
             unnormal_send = unnormal_num
             dist = distence
             img_send = end_img
         num += 1
         if int(num) % int(
                 dist_time * 1000 /
                 100) == 0 and unnormal_send != 0 and camera_sta == True:
             if not os.path.exists(
                     '/DATA/pedestrian_dist/images/static/' +
                     datetime.datetime.now().strftime("%Y-%m-%d")):
                 os.mkdir('/DATA/pedestrian_dist/images/static/' +
                          datetime.datetime.now().strftime("%Y-%m-%d"))
             save_path = '/DATA/pedestrian_dist/images/static/' + datetime.datetime.now(
             ).strftime("%Y-%m-%d") + '/' + datetime.datetime.now(
             ).strftime('%Y-%m-%d-%H-%M-%S') + '_' + str(
                 self.rount) + '.jpg'
             # print(save_path)
             cv2.imwrite(save_path, img_send)
             Post(
                 self.alarmpost,
                 RobotLogin(
                     self.rount,
                     str(datetime.datetime.now().strftime(
                         '%Y-%m-%d %H:%M:%S')), str(round(dist, 1)),
                     str(unnormal_send), str(os.path.basename(save_path)),
                     str(camera_sta)))
             num = 0
             unnormal_send = 0
     del trt_yolo
     cuda_ctx.pop()
     del cuda_ctx
Example #8
0
def main():
    args = parse_args()

    cls_dict = get_cls_dict('coco')
    yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))
    vis = BBoxVisualization(cls_dict)

    check(trt_yolov3, conf_th=0.3, vis=vis)
Example #9
0
 def __init__(self, model, size, vis, conf_th, cat_num):
     self.model = model
     self.size = size
     self.vis = vis
     self.th = conf_th
     self.num = cat_num
     self.bridge = CvBridge()
     self.yoloRT = TrtYOLOv3(model, (size, size), cat_num)
     self.pub = rospy.Publisher("people_detection", Image)
Example #10
0
def detect_and_alarm():
    WINDOW_NAME = "Robot_YOLOv3_Detector_with_TensorRT"
    MODEL = "yolov3-tiny-416"
    IMAGE_WIDTH = 640
    IMAGE_HEIGHT = 480
    RUN_TIME = False
    global DETECT_TIME

    cam = Camera_for_Robot(video_dev=DETECT_ID,
                           image_width=IMAGE_WIDTH,
                           image_height=IMAGE_HEIGHT)
    #cam = Camera_for_Robot(video_dev='./test.mp4', image_width=IMAGE_WIDTH, image_height=IMAGE_HEIGHT)
    cam.open()

    if not cam.is_opened:
        print("Capture road opens failure")
        #return
    cam.start()

    open_window(WINDOW_NAME, IMAGE_WIDTH, IMAGE_HEIGHT,
                'TensorRT YOLOv3 Detector')
    cls_dict = get_cls_dict('coco')
    yolo_dim = int(MODEL.split('-')[-1])  # 416 or 608
    trt_yolov3 = TrtYOLOv3(model=MODEL, input_shape=(yolo_dim, yolo_dim))
    vis = BBoxVisualization(cls_dict)

    loop_and_detect(cam,
                    RUN_TIME,
                    trt_yolov3,
                    conf_th=0.3,
                    vis=vis,
                    window_name=WINDOW_NAME,
                    total_time=DETECT_TIME)
    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
    print("Detection finishes successfully")
Example #11
0
def main():
    args = parse_args()
    if args.category_num <= 0:
        raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)
    if not os.path.isfile('yolov3_onnx/%s.trt' % args.model):
        raise SystemExit('ERROR: file (yolov3_onnx/%s.trt) not found!' %
                         args.model)

    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        raise SystemExit('ERROR: failed to open camera!')

    cls_dict = get_cls_dict(args.category_num)
    yolo_dim = args.model.split('-')[-1]
    if 'x' in yolo_dim:
        dim_split = yolo_dim.split('x')
        if len(dim_split) != 2:
            raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
        w, h = int(dim_split[0]), int(dim_split[1])
    else:
        h = w = int(yolo_dim)
    if h % 32 != 0 or w % 32 != 0:
        raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)

    trt_yolov3 = TrtYOLOv3(args.model, (h, w), args.category_num)

    cam.start()
    open_window(WINDOW_NAME, args.image_width, args.image_height,
                'Camera TensorRT YOLOv3 Demo')
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Example #12
0
def main():
    model = "yolov3-tiny-covid"
    category_num = 5
    conf_th = 0.3
    input_size = 608
    cls_dict = get_cls_dict(category_num)
    vis = BBoxVisualization(cls_dict)
    #args = parse_args()
    detector = yoloRT(model, input_size, vis, conf_th, category_num)
    pub = rospy.Publisher('people_detection', Bool, queue_size=5)
    rospy.init_node('people_detector', anonymous=True)
    sub = rospy.Subscriber('front_camera/color/image_raw', Image,
                           detector.detect)
    #pipeline = camera_config(sys.argv)
    try:
        rospy.spin()
    except KeyboardInterrupt:
        print("Shutting down")
    cls_dict = get_cls_dict(category_num)
    yolo_dim = model.split('-')[-1]
    if 'x' in yolo_dim:
        dim_split = yolo_dim.split('x')
        if len(dim_split) != 2:
            raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
        w, h = int(dim_split[0]), int(dim_split[1])
    else:
        h = w = int(608)
    if h % 32 != 0 or w % 32 != 0:
        raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)

    trt_yolov3 = TrtYOLOv3(model, (h, w), category_num)

    vis = BBoxVisualization(cls_dict)
    loop_and_detect(pub, pipeline, trt_yolov3, conf_th=0.3, vis=vis)

    pipeline.stop()
Example #13
0
    loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()


# if __name__ == '__main__':
#     main()
args = parse_args()

cap = cv2.VideoCapture(0)  #("rtsp://*****:*****@192.168.10.67:554/1/1")

cls_dict = get_cls_dict('coco')
yolo_dim = int(args.model.split('-')[-1])  # 416 or 608
trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim))
vis = BBoxVisualization(cls_dict)

full_scrn = False
fps = 0.0
tic = time.time()
conf_th = 0.3

while True:
    _, frame = cap.read()

    time_start = time.time()
    boxes, confs, clss = trt_yolov3.detect(frame, conf_th)
    print("--------------tf1----------\n",
          time.time() - time_start, "\n-------------------------\n")
    img = vis.draw_bboxes(frame, boxes, confs, clss)