def detect(q1, q2):
    model_config = parser.parse_args()
    print("Loading detector")
    model = "ssd_mobilenet_v2_coco"
    conf_th = 0.3
    INPUT_HW = (300, 300)
    cls_dict = get_cls_dict("coco")
    vis = BBoxVisualization(cls_dict)
    trt_ssd = TrtSSD(model, INPUT_HW)
    print("Loading detector complete")
    if model_config.ui == 1:
        cv2.startWindowThread()
        cv2.namedWindow("window")

    while 1:
        try:
            frame, frame_time = q1.get()
            delay = time.time() - frame_time
            if delay > 0.4:
                print("Skipping frame")
                continue
            boxes, confs, clss = trt_ssd.detect(frame, conf_th)
            print([get_cls_dict("coco")[c] for c in clss])
            if model_config.ui == 1:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                img = vis.draw_bboxes(frame, boxes, confs, clss)
                cv2.imshow('window', img[..., ::-1])
        except Exception as e:
            traceback.print_exc()
class TrtThread(threading.Thread):
    """TrtThread

    This implements the child thread which continues to read images
    from cam (input) and to do TRT engine inferencing.  The child
    thread stores the input image and detection results into global
    variables and uses a condition varaiable to inform main thread.
    In other words, the TrtThread acts as the producer while the
    main thread is the consumer.
    """
    def __init__(self, condition, cam, model, conf_th):
        """__init__

        # Arguments
            condition: the condition variable used to notify main
                       thread about new frame and detection result
            cam: the camera object for reading input image frames
            model: a string, specifying the TRT SSD model
            conf_th: confidence threshold for detection
        """
        threading.Thread.__init__(self)
        self.condition = condition
        self.cam = cam
        self.model = model
        self.conf_th = conf_th
        self.cuda_ctx = None  # to be created when run
        self.trt_ssd = None  # to be created when run
        self.running = False

    def run(self):
        """Run until 'running' flag is set to False by main thread.

        NOTE: CUDA context is created here, i.e. inside the thread
        which calls CUDA kernels.  In other words, creating CUDA
        context in __init__() doesn't work.
        """
        global s_img, s_boxes, s_confs, s_clss

        print('TrtThread: loading the TRT SSD engine...')
        self.cuda_ctx = cuda.Device(0).make_context()  # GPU 0
        self.trt_ssd = TrtSSD(self.model, INPUT_HW)
        print('TrtThread: start running...')
        self.running = True
        while self.running:
            img = self.cam.read()
            if img is None:
                break
            boxes, confs, clss = self.trt_ssd.detect(img, self.conf_th)
            with self.condition:
                s_img, s_boxes, s_confs, s_clss = img, boxes, confs, clss
                self.condition.notify()
        del self.trt_ssd
        self.cuda_ctx.pop()
        del self.cuda_ctx
        print('TrtThread: stopped...')

    def stop(self):
        self.running = False
        self.join()
Exemple #3
0
def main(config):
    args = parse_args()
    data_sys = Data_sys()
    pub = Publish(host=config['Mqtt_pub'])

    cls_dict = get_cls_dict(args.model.split('_')[-1])
    trt_ssd = TrtSSD(args.model, INPUT_HW)

    img_list = []

    # video
    cap = cv2.VideoCapture('xiasha.avi')
    i = 0
    while cap.isOpened():
        frame, img = cap.read()
        if img is not None:

            img_list.append([img])
            result = trt_ssd.detect(img_list[0], conf_th=0.3)
            # print(result)
            data_sys.dataSynchronization(result, img_list, args.model,
                                         ['boundary_intrude', None],
                                         config['Zone'], config['Channel'][0],
                                         config['device_id'], pub,
                                         config['Polygon'])
            img_list = []
            i = i + 1
            print(i)
        else:
            msg = json.dumps({
                "nvr_id": config['Zone'],
                "device_id": config['device_id'],
                "channel_id": config['Channel'][0]
            })
            pub.send_msg(topic="zs/ai_spxwfx/rtsp/" + config['Zone'] + "/" +
                         config['Channel'][0],
                         msg=msg,
                         Zone=config['Zone'],
                         device_id=config['device_id'])
    '''
Exemple #4
0
from utils.visualization import BBoxVisualization
from utils.display import open_window, set_display, show_fps
import time
import numpy as np

model = "ssd_mobilenet_v2_coco"
filename = "./dogs.jpg"
conf_th = 0.3
INPUT_HW = (300, 300)
cls_dict = get_cls_dict("coco")
vis = BBoxVisualization(cls_dict)
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
trt_ssd = TrtSSD(model, INPUT_HW)
# Kick start the model.
for _ in range(20):
    boxes, confs, clss = trt_ssd.detect(img, conf_th)
print([get_cls_dict("coco")[c] for c in clss])
img = vis.draw_bboxes(img, boxes, confs, clss)
cv2.imwrite("result.jpg", img[..., ::-1])

times = []
for _ in range(20):
    start_time = time.time()
    boxes, confs, clss = trt_ssd.detect(img, conf_th)
    delta = time.time() - start_time
    times.append(delta)
mean_delta = np.array(times).mean()
fps = 1 / mean_delta
print("Average(sec):{:.4f},fps:{:.2f}".format(mean_delta, fps))
Exemple #5
0

if __name__ == "__main__":
    args = parse_args()
    trt_ssd = TrtSSD(args.model, (args.image_resize, args.image_resize))

    mot_tracker = Sort(args.sort_max_age, args.sort_min_hit)
    video = '/home/mengjun/xianlu/data/digger.mp4'
    cap = cv2.VideoCapture(video)
    colours = np.random.rand(32, 3) * 255
    fps = 0.0
    tic = time.time()

    while cap.isOpened():
        ret, frame = cap.read()
        boxes, confs, clss = trt_ssd.detect(frame, args.det_conf_thresh)
        #print(boxes, confs, clss)
        if len(boxes) != 0:
            result = []
            for bb, cf, cl in zip(boxes, confs, clss):
                result.append([bb[0], bb[1], bb[2], bb[3], cl])
            result = np.array(result, dtype=object)
            #print('result:',result)
            height = frame.shape[0]
            width = frame.shape[1]

            if len(clss) == 0:
                continue
            else:
                det = result[:, 0:5]
                print('det:', det)