Пример #1
0
def detect(q1, q2):
    model_config = parser.parse_args()
    print("Loading detector")
    model = "ssd_mobilenet_v2_coco"
    conf_th = 0.3
    INPUT_HW = (300, 300)
    cls_dict = get_cls_dict("coco")
    vis = BBoxVisualization(cls_dict)
    trt_ssd = TrtSSD(model, INPUT_HW)
    print("Loading detector complete")
    if model_config.ui == 1:
        cv2.startWindowThread()
        cv2.namedWindow("window")

    while 1:
        try:
            frame, frame_time = q1.get()
            delay = time.time() - frame_time
            if delay > 0.4:
                print("Skipping frame")
                continue
            boxes, confs, clss = trt_ssd.detect(frame, conf_th)
            print([get_cls_dict("coco")[c] for c in clss])
            if model_config.ui == 1:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                img = vis.draw_bboxes(frame, boxes, confs, clss)
                cv2.imshow('window', img[..., ::-1])
        except Exception as e:
            traceback.print_exc()
Пример #2
0
    def run(self):
        """Run until 'running' flag is set to False by main thread.

        NOTE: CUDA context is created here, i.e. inside the thread
        which calls CUDA kernels.  In other words, creating CUDA
        context in __init__() doesn't work.
        """
        global s_img, s_boxes, s_confs, s_clss

        print('TrtThread: loading the TRT SSD engine...')
        self.cuda_ctx = cuda.Device(0).make_context()  # GPU 0
        self.trt_ssd = TrtSSD(self.model, INPUT_HW)
        print('TrtThread: start running...')
        self.running = True
        while self.running:
            img = self.cam.read()
            if img is None:
                break
            boxes, confs, clss = self.trt_ssd.detect(img, self.conf_th)
            with self.condition:
                s_img, s_boxes, s_confs, s_clss = img, boxes, confs, clss
                self.condition.notify()
        del self.trt_ssd
        self.cuda_ctx.pop()
        del self.cuda_ctx
        print('TrtThread: stopped...')
Пример #3
0
def main():
    args = parse_args()
    cam = Camera(args)
    cam.open()
    if not cam.is_opened:
        sys.exit('Failed to open camera!')

    cls_dict = get_cls_dict(args.model)
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    cam.start()
    if args.use_console:
        loop_and_detect_console(cam,
                                trt_ssd,
                                conf_th=0.3,
                                loop=args.loop,
                                cls_dict=cls_dict)
    else:
        open_window(WINDOW_NAME, args.image_width, args.image_height,
                    'Camera TensorRT SSD Demo for Jetson Nano')
        vis = BBoxVisualization(cls_dict)
        loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis)

    cam.stop()
    cam.release()
    cv2.destroyAllWindows()
Пример #4
0
class TrtThread(threading.Thread):
    """TrtThread

    This implements the child thread which continues to read images
    from cam (input) and to do TRT engine inferencing.  The child
    thread stores the input image and detection results into global
    variables and uses a condition varaiable to inform main thread.
    In other words, the TrtThread acts as the producer while the
    main thread is the consumer.
    """
    def __init__(self, condition, cam, model, conf_th):
        """__init__

        # Arguments
            condition: the condition variable used to notify main
                       thread about new frame and detection result
            cam: the camera object for reading input image frames
            model: a string, specifying the TRT SSD model
            conf_th: confidence threshold for detection
        """
        threading.Thread.__init__(self)
        self.condition = condition
        self.cam = cam
        self.model = model
        self.conf_th = conf_th
        self.cuda_ctx = None  # to be created when run
        self.trt_ssd = None  # to be created when run
        self.running = False

    def run(self):
        """Run until 'running' flag is set to False by main thread.

        NOTE: CUDA context is created here, i.e. inside the thread
        which calls CUDA kernels.  In other words, creating CUDA
        context in __init__() doesn't work.
        """
        global s_img, s_boxes, s_confs, s_clss

        print('TrtThread: loading the TRT SSD engine...')
        self.cuda_ctx = cuda.Device(0).make_context()  # GPU 0
        self.trt_ssd = TrtSSD(self.model, INPUT_HW)
        print('TrtThread: start running...')
        self.running = True
        while self.running:
            img = self.cam.read()
            if img is None:
                break
            boxes, confs, clss = self.trt_ssd.detect(img, self.conf_th)
            with self.condition:
                s_img, s_boxes, s_confs, s_clss = img, boxes, confs, clss
                self.condition.notify()
        del self.trt_ssd
        self.cuda_ctx.pop()
        del self.cuda_ctx
        print('TrtThread: stopped...')

    def stop(self):
        self.running = False
        self.join()
Пример #5
0
def main(config):
    args = parse_args()
    data_sys = Data_sys()
    pub = Publish(host=config['Mqtt_pub'])

    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    img_list = []

    # video
    cap = cv2.VideoCapture('xiasha.avi')
    i = 0
    while cap.isOpened():
        frame, img = cap.read()
        if img is not None:

            img_list.append([img])
            result = trt_ssd.detect(img_list[0], conf_th=0.3)
            # print(result)
            data_sys.dataSynchronization(result, img_list, args.model,
                                         ['boundary_intrude', None],
                                         config['Zone'], config['Channel'][0],
                                         config['device_id'], pub,
                                         config['Polygon'])
            img_list = []
            i = i + 1
            print(i)
        else:
            msg = json.dumps({
                "nvr_id": config['Zone'],
                "device_id": config['device_id'],
                "channel_id": config['Channel'][0]
            })
            pub.send_msg(topic="zs/ai_spxwfx/rtsp/" + config['Zone'] + "/" +
                         config['Channel'][0],
                         msg=msg,
                         Zone=config['Zone'],
                         device_id=config['device_id'])
    '''
def main():
    # Parse arguments and get input
    args = parse_args()
    cam = Camera(args)
    if not cam.isOpened():
        raise SystemExit('ERROR: failed to open camera!')

    # Create NN1 and NN2 models and load into memory
    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)
    mtcnn = TrtMtcnn()

    # Create Preview Window
    open_window(WINDOW_NAME, 'Camera Preview', cam.img_width, cam.img_height)
    vis = BBoxVisualization(cls_dict)

    # Enter Detection Mode
    while True:
        # Get Image
        img = cam.read()
        out.write(img)
        nn1_results = []
        # Run Neural Networks
        img, nn1_results, nn2_results, nn3_results = loop_and_detect(
            img, mtcnn, args.minsize, trt_ssd, conf_th=0.3, vis=vis)

        # Communicate to Arduino
        if (nn1_results != []):
            img = robot_drive(img, nn1_results)
        else:
            serial_port.write("N".encode())
            print("N")

        # Display and save output
        cv2.imshow(WINDOW_NAME, img)
        outNN.write(img)

        # User/Keyboard Input
        key = cv2.waitKey(1)
        if key == ord('q'):
            out.release()
            outNN.release()
            break

    # Clean up and exit
    cam.release()
    cv2.destroyAllWindows()
    serial_port.close()
Пример #7
0
def main():
    args = parse_args()
    cam = Camera(args)
    if not cam.get_is_opened():
        raise SystemExit('ERROR: failed to open camera!')

    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    open_window(WINDOW_NAME, 'Camera TensorRT SSD Demo', cam.img_width,
                cam.img_height)
    vis = BBoxVisualization(cls_dict)
    loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis)

    cam.release()
    cv2.destroyAllWindows()
Пример #8
0
def main():
    # Parse arguments and get input
    args = parse_args()

    # Create NN1 and NN2 models
    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)
    mtcnn = TrtMtcnn()

    # Create Preview Window
    vis = BBoxVisualization(cls_dict)

    imageNum = 10

    # Enter Detection Mode
    while True:
        # Get Image
        imageName = "/home/nvidia/Pictures/test13.jpg"
        #imageName = "/media/E76F-73E0/Faces/1 (" + str(imageNum) + ").jpg"
        #imageName = "/home/nvidia/Pictures/Benchmarks/Pedestrians/PennPed000" + str(imageNum) + ".png"
        imageNum = imageNum + 1
        #print(imageName)
        img = cv2.imread(imageName)

        cv2.imshow(WINDOW_NAME, img)

        # Run Neural Networks
        img, nn1_results, nn2_results, nn3_results = loop_and_detect(
            img, mtcnn, args.minsize, trt_ssd, conf_th=0.3, vis=vis)

        # Display Results
        cv2.imshow(WINDOW_NAME, img)
        #cv2.waitKey(0)

        # User/Keyboard Input
        key = cv2.waitKey(1)
        if key == 27:  # ESC key: quit program
            break

    # Clean up and exit
    cv2.destroyAllWindows()
    serial_port.close()
Пример #9
0
def main(args, cam):
    # args = parse_args()
    is_open = up()
    time.sleep(60)
    # cam = Camera(args)
    if is_open:
        cam.open()
        if not cam.is_opened:
            sys.exit('Failed to open camera!')

        cls_dict = get_cls_dict(args.model.split('_')[-1])
        trt_ssd = TrtSSD(args.model, INPUT_HW)

        cam.start()
        vis = BBoxVisualization(cls_dict)
        loop_and_detect(cam, trt_ssd, conf_th=0.9, vis=vis)

        cam.stop()
        cam.release()
        cv2.destroyAllWindows()
Пример #10
0
def main():
    args = parse_args()
    cam = Camera(args)
    is_open = up()
    #time.sleep(60)
    if is_open:
        cam.open()
        if not cam.is_opened:
            sys.exit('Failed to open camera!')

        cls_dict = get_cls_dict(args.model.split('_')[-1])
        trt_ssd = TrtSSD(args.model, INPUT_HW)

        cam.start()
        open_window(WINDOW_NAME, args.image_width, args.image_height,
                    'Camera TensorRT SSD Demo for Jetson Nano')
        vis = BBoxVisualization(cls_dict)
        loop_and_detect(cam, trt_ssd, conf_th=0.9, vis=vis)

        cam.stop()
        cam.release()
        cv2.destroyAllWindows()
Пример #11
0
def main():
    args = parse_args()
    check_args(args)

    results_file = 'ssd/results_%s_%s.json' % (args.model, args.mode)
    if args.mode == 'trt':
        ssd = TrtSSD(args.model, INPUT_HW)
    else:
        ssd = TfSSD(args.model, INPUT_HW)

    jpgs = [j for j in os.listdir(args.imgs_dir) if j.endswith('.jpg')]
    generate_results(ssd, args.imgs_dir, jpgs, results_file)

    # Run COCO mAP evaluation
    # Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
    cocoGt = COCO(args.annotations)
    cocoDt = cocoGt.loadRes(results_file)
    imgIds = sorted(cocoGt.getImgIds())
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.params.imgIds = imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    print(cocoEval.summarize())
Пример #12
0
from utils.ssd_classes import get_cls_dict
from utils.ssd import TrtSSD
from utils.visualization import BBoxVisualization
from utils.display import open_window, set_display, show_fps
import time
import numpy as np

model = "ssd_mobilenet_v2_coco"
filename = "./dogs.jpg"
conf_th = 0.3
INPUT_HW = (300, 300)
cls_dict = get_cls_dict("coco")
vis = BBoxVisualization(cls_dict)
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
trt_ssd = TrtSSD(model, INPUT_HW)
# Kick start the model.
for _ in range(20):
    boxes, confs, clss = trt_ssd.detect(img, conf_th)
print([get_cls_dict("coco")[c] for c in clss])
img = vis.draw_bboxes(img, boxes, confs, clss)
cv2.imwrite("result.jpg", img[..., ::-1])

times = []
for _ in range(20):
    start_time = time.time()
    boxes, confs, clss = trt_ssd.detect(img, conf_th)
    delta = time.time() - start_time
    times.append(delta)
mean_delta = np.array(times).mean()
fps = 1 / mean_delta
Пример #13
0
    '''parse args'''
    parser = argparse.ArgumentParser()

    parser = add_camera_args(parser)
    parser.add_argument('--model', type=str, default='ssd_mobilenet_v1_digger')
    parser.add_argument('--image_resize', default=300, type=int)
    parser.add_argument('--det_conf_thresh', default=0.8, type=float)
    parser.add_argument('--seq_dir', default="sequence/")
    parser.add_argument('--sort_max_age', default=5, type=int)
    parser.add_argument('--sort_min_hit', default=3, type=int)
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    trt_ssd = TrtSSD(args.model, (args.image_resize, args.image_resize))

    mot_tracker = Sort(args.sort_max_age, args.sort_min_hit)
    video = '/home/mengjun/xianlu/data/digger.mp4'
    cap = cv2.VideoCapture(video)
    colours = np.random.rand(32, 3) * 255
    fps = 0.0
    tic = time.time()

    while cap.isOpened():
        ret, frame = cap.read()
        boxes, confs, clss = trt_ssd.detect(frame, args.det_conf_thresh)
        #print(boxes, confs, clss)
        if len(boxes) != 0:
            result = []
            for bb, cf, cl in zip(boxes, confs, clss):