示例#1
0
 def __init__(self, args):
     self.args = args
     self.open_video()
     #self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names,use_cuda=args.ctx, is_xywh=True, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh)
     self.command_type = args.mot_type
     threshold = np.array([0.7, 0.8, 0.9])
     crop_size = [112, 112]
     if self.command_type == 'face':
         self.mtcnn = MtcnnDetector(threshold, crop_size, args.detect_model)
     elif self.command_type == 'person':
         self.person_detect = RetinanetDetector(args)
     elif self.command_type == 'head':
         self.head_detect = HeadDetect(args)
     self.kf = KalmanFilter()
     self.deepsort = DeepSort(args.feature_model,
                              args.face_load_num,
                              mot_type=args.mot_type)
     self.meanes_track = []
     self.convariances_track = []
     self.id_cnt_dict = dict()
     self.tracker_run = TrackerRun(args.tracker_type)
     self.moveTrack = MoveTrackerRun(self.kf)
     self.img_clarity = BlurDetection()
     self.score = 60.0
     self.in_num = 0
     self.out_num = 0
示例#2
0
    def __init__(
            self,
            cfg,
            weights,
            video_path,
            deep_checkpoint="deep_sort/deep/checkpoint/ckpt.t7",
            data="dataset1.data",
            output_file=None,
            img_size=416,
            display=False,
            nms_thres=0.4,
            conf_thres=0.5,
            max_dist=0.2,
            display_width=800,
            display_height=600,
            save_path=None):
        device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        self.vidCap = cv2.VideoCapture()
        self.yolov3 = InferYOLOv3(cfg, img_size, weights, data, device,
                                  conf_thres, nms_thres)
        self.deepsort = DeepSort(deep_checkpoint,
                                 max_dist)
        self.display = display
        self.video_path = video_path
        self.output_file = output_file
        self.save_path = save_path

        if self.display:
            cv2.namedWindow("Test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("Test", display_width, display_height)
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))
        params = Params(f'projects/{self.args.project}.yml')
        self.submit = True
        self.cam_id = 1
        self.object_list = []
        self.object_list_tracks = []
        if args.display:
            pass
            # cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            # cv2.resizeWindow("test", args.display_width, args.display_height)

        self.vdo = cv2.VideoCapture()
        self.efficientdet = EfficientDetBackbone(
            num_classes=len(params.obj_list),
            compound_coef=self.args.compound_coef,
            ratios=eval(params.anchors_ratios),
            scales=eval(params.anchors_scales)).cuda()
        # self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names, is_xywh=True, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, use_cuda=use_cuda)

        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=True)
        # self.class_names = self.yolo3.class_names
        self.efficientdet.load_state_dict(torch.load(
            args.detector_weights_path),
                                          strict=False)
示例#4
0
 def __init__(self, args):
     self.args = args
     # if args.display:
     #     cv2.namedWindow("test", cv2.WINDOW_NORMAL)
     #     cv2.resizeWindow("test", args.display_width, args.display_height)
     self.open_video()
     #self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names,use_cuda=args.use_cuda, is_xywh=True, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh)
     self.command_type = args.mot_type
     threshold = np.array([0.7, 0.8, 0.9])
     crop_size = [112, 112]
     if self.command_type == 'face':
         self.mtcnn = MtcnnDetector(threshold, crop_size, args.detect_model)
     elif self.command_type == 'person':
         self.person_detect = RetinanetDetector(args)
     self.deepsort = DeepSort(args.feature_model,
                              args.face_load_num,
                              use_cuda=args.use_cuda,
                              mot_type=self.command_type)
     self.kf = KalmanFilter()
     self.meanes_track = []
     self.convariances_track = []
     self.id_cnt_dict = dict()
     self.moveTrack = MoveTrackerRun(self.kf)
     self.img_clarity = BlurDetection()
     self.score = 60.0
示例#5
0
 def __init__(self, centernet_opt, args):
     # CenterNet detector
     self.detector = detector_factory[centernet_opt.task](centernet_opt)
     # Deep SORT
     self.deepsort = DeepSort(args.deepsort_checkpoint,
                              args.max_cosine_distance, args.use_cuda)
     self.args = args
 def __init__(self):
     self.vdo = cv2.VideoCapture()
     self.yolo3 = YOLO3("YOLO3/cfg/yolo_v3.cfg","YOLO3/yolov3.weights","YOLO3/cfg/coco.names", is_xywh=True)
     self.yolo3.net= self.yolo3.net.cuda()
     self.deepsort = DeepSort("deep/checkpoint/ckpt.t7")
     self.class_names = self.yolo3.class_names
     self.write_video = True
示例#7
0
    def __init__(self,
                 cfg,
                 weights,
                 video_path,
                 deep_checkpoint="deep_sort/deep/checkpoint/resnet50_last.pt",
                 output_file=None,
                 img_size=512,
                 display=True,
                 max_dist=0.2,
                 display_width=800,
                 display_height=600,
                 save_path=None,
                 json_path='./data/pascal_voc_classes.json'):
        device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        # init opencv video capturer
        self.vidCap = cv2.VideoCapture()
        # init a detector
        self.yolov3 = InferYOLOv3(cfg, img_size, weights, device, json_path)
        # init a deepsort tracker
        self.deepsort = DeepSort(deep_checkpoint, max_dist)
        # settings
        self.display = display
        self.video_path = video_path
        self.output_file = output_file
        self.save_path = save_path

        if self.display:
            cv2.namedWindow("Test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("Test", display_width, display_height)
示例#8
0
    def __init__(self, opt):
        self.vdo = cv2.VideoCapture()

        #centerNet detector
        self.detector = detector_factory[opt.task](opt)
        self.deepsort = DeepSort("deep/checkpoint/ckpt.t7")

        self.write_video = True
示例#9
0
 def __init__(self):
     self.vdo = cv2.VideoCapture()
     self.yolo3 = YOLO3("YOLO3/cfg/yolo_v3.cfg",
                        "/local/b/cam2/data/HumanBehavior/yolov3.weights",
                        "YOLO3/cfg/coco.names",
                        is_xywh=True)
     self.deepsort = DeepSort("/local/b/cam2/data/HumanBehavior/ckpt.t7")
     self.class_names = self.yolo3.class_names
     self.write_video = True
示例#10
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        self.vdo = cv2.VideoCapture()
        self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names, is_xywh=True,
                            conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, use_cuda=use_cuda)
        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
        self.class_names = self.yolo3.class_names
示例#11
0
 def __init__(self, centernet_opt, args):
     # CenterNet detector
     self.detector = detector_factory[centernet_opt.task](centernet_opt)
     # Deep SORT
     self.deepsort = DeepSort(args.deepsort_checkpoint,
                              args.max_cosine_distance, args.use_cuda, args.use_original_model)
     self.debug = args.debug
     if self.debug and not os.path.exists(args.debug_dir):
         os.mkdir(args.debug_dir)
     self.args = args
示例#12
0
    def __init__(self, opt):
        self.vdo = cv2.VideoCapture()
        #self.yolo_info = YOLO3("YOLO3/cfg/yolo_v3.cfg", "YOLO3/yolov3.weights", "YOLO3/cfg/coco.names", is_xywh=True)

        #centerNet detector
        self.detector = detector_factory[opt.task](opt)
        self.deepsort = DeepSort("deep/checkpoint/ckpt.t7")
        # self.deepsort = DeepSort("deep/checkpoint/ori_net_last.pth")

        self.write_video = True
 def __init__(self, args):
     self.args = args
     use_cuda = bool(strtobool(self.args.use_cuda))
     #use_cuda=False
     if args.display:
         cv2.namedWindow("test", cv2.WINDOW_NORMAL)
         cv2.resizeWindow("test", args.display_width, args.display_height)
     self.vdo = cv2.VideoCapture()
     self.detectron2 = Detectron2()
     self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
示例#14
0
    def __init__(self, args):
        self.args = args

        use_cuda = bool(strtobool(self.args.use_cuda))

        self.detectron2 = Detectron2(self.args.detectron_cfg,
                                     self.args.detectron_ckpt)
        if self.args.deep_sort:
            self.deepsort = DeepSort(args.deepsort_checkpoint,
                                     use_cuda=use_cuda)
示例#15
0
    def __init__(self, args):
        self.args = args
        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        self.vdo = cv2.VideoCapture()
        self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names, is_xywh=True, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh)
        self.deepsort = DeepSort(args.deepsort_checkpoint)
        self.class_names = self.yolo3.class_names
示例#16
0
    def __init__(self, args):
        self.args = args
        args.display = False
        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        self.vdo = cv2.VideoCapture()
        self.centernet = detector = detector_factory[opt.task](opt)
        # self.yolo3 = YOLOv3(args.yolo_cfg, args.yolo_weights, args.yolo_names, is_xywh=True, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh)
        self.deepsort = DeepSort(args.deepsort_checkpoint, args.model_name)
示例#17
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        self.vdo = cv2.VideoCapture()
        self.detectron2 = Detectron2()

        # Initialize coordinate mapper
        myCoordMapper = coord_mapper.CoordMapper(coord_mapper.ISSIA_kozep_elorol)

        self.deepsort = DeepSort(args.deepsort_checkpoint, lambdaParam=1.0, coordMapper=myCoordMapper, max_dist=1.0, min_confidence=0.1, 
                        nms_max_overlap=0.7, max_iou_distance=0.7, max_age=75, n_init=3, nn_budget=50, use_cuda=use_cuda)
示例#18
0
def tracking(queue_items: mp.Queue, area):
    txt_writer = open(txt_path, 'wt')
    deepsorts = []
    for i in range(5):
        deepsort = DeepSort("deep/checkpoint/ckpt.t7")
        deepsort.extractor.net.share_memory()
        deepsorts.append(deepsort)
    xmin, ymin, xmax, ymax = area
    while True:
        try:
            queue_item = queue_items.get(block=True, timeout=3)
        except queue.Empty:
            print('Empty queue. End?')
            break

        batch_results = queue_item.detect_results
        imgs = queue_item.imgs
        ori_imgs = queue_item.ori_imgs
        frame_ids = queue_item.frame_ids
        for batch_idx, results in enumerate(batch_results):  # frame by frame
            for class_id in [1, 2, 3, 4]:
                bbox_xywh, cls_conf = bbox_to_xywh_cls_conf(results, class_id)
                if (bbox_xywh is not None) and (len(bbox_xywh) > 0):
                    outputs = deepsorts[class_id].update(
                        bbox_xywh, cls_conf, imgs[batch_idx])
                    if len(outputs) > 0:
                        bbox_xyxy = outputs[:, :4]
                        identities = outputs[:, -1]

                        offset = (xmin, ymin)
                        if is_write:
                            ori_im = draw_bboxes(ori_imgs[batch_idx],
                                                 bbox_xyxy,
                                                 identities,
                                                 class_id,
                                                 offset=(xmin, ymin))
                        for i, box in enumerate(bbox_xyxy):
                            x1, y1, x2, y2 = [int(i) for i in box]
                            x1 += offset[0]
                            x2 += offset[0]
                            y1 += offset[1]
                            y2 += offset[1]
                            idx = int(
                                identities[i]) if identities is not None else 0
                            txt_writer.write(
                                f'{frame_ids[batch_idx]} {class_id} {idx} {x1} {y1} {x2} {y2}\n'
                            )
    txt_writer.close()
示例#19
0
    def __init__(self,
                 detections_file: str,
                 resolution: tuple,
                 fps: int,
                 input_images_dir: str,
                 output_video_path: str,
                 output_result_path: str,
                 use_cuda: bool,
                 lambdaParam: float,
                 max_dist: float,
                 min_confidence: float,
                 nms_max_overlap: float,
                 max_iou_distance: float,
                 max_age: int,
                 n_init: int,
                 nn_budget: int,
                 model_path='deep_sort/deep/checkpoint/ckpt.t7',
                 early_stopping=None):

        self.detections_file = detections_file  # A pickle fájl amiben az összes detekció benne van
        self.input_images_dir = input_images_dir  # A mappa ahol a 2.5K-s képek vannak {frameNum}.jpg formátumban
        self.output_video_path = output_video_path  # Ahova a vizualizálandó videót mentem
        self.output_result_path = output_result_path  # Ahová a kimenetet mentem CSV formátumba
        self.early_stopping = early_stopping

        assert self.output_result_path is not None and self.detections_file is not None

        self._use_cuda = use_cuda
        self.fps = fps
        self.resolution = resolution
        # Initialize coordinate mapper
        self.myCoordMapper = coord_mapper.CoordMapperCSG(
            match_code='HUN-BEL 1. Half')

        self.deepsort = DeepSort(model_path=model_path,
                                 lambdaParam=lambdaParam,
                                 coordMapper=self.myCoordMapper,
                                 max_dist=max_dist,
                                 min_confidence=min_confidence,
                                 nms_max_overlap=nms_max_overlap,
                                 max_iou_distance=max_iou_distance,
                                 max_age=max_age,
                                 n_init=n_init,
                                 nn_budget=nn_budget,
                                 use_cuda=self._use_cuda,
                                 resolution=(self.resolution[0] * 2,
                                             self.resolution[1]),
                                 fps=self.fps)
示例#20
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))
        params = Params(f'projects/{self.args.project}.yml')
        self.cam_id = 1
        if args.display:
            pass
            # cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            # cv2.resizeWindow("test", args.display_width, args.display_height)

        self.vdo = cv2.VideoCapture()

        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
        self.class_names = load_class_names('data/coco.names')
        self.submit = True
        self.object_list = []
示例#21
0
 def __init__(self, args):
     self.args = args
     if args.display:
         cv2.namedWindow("test", cv2.WINDOW_NORMAL)
         cv2.resizeWindow("test", args.display_width, args.display_height)
     device = torch.device(
         'cuda') if torch.cuda.is_available() else torch.device('cpu')
     self.vdo = cv2.VideoCapture()
     self.yolo3 = InferYOLOv3(args.yolo_cfg,
                              args.img_size,
                              args.yolo_weights,
                              args.data_cfg,
                              device,
                              conf_thres=args.conf_thresh,
                              nms_thres=args.nms_thresh)
     self.deepsort = DeepSort(args.deepsort_checkpoint)
     self.class_names = self.yolo3.classes
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))
        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        self.yolo3 = YOLOv3(args.yolo_cfg,
                            args.yolo_weights,
                            args.yolo_names,
                            is_xywh=True,
                            conf_thresh=args.conf_thresh,
                            nms_thresh=args.nms_thresh,
                            use_cuda=use_cuda)
        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
        self.class_names = self.yolo3.class_names

        self.resnet3d_model, self.resnet_spatial_transform = get_resnet_model()
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))
        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if not args.image_input:
            self.vdo = cv2.VideoCapture()
        cfg = get_cfg()
        #cfg.merge_from_file("detectron2_repo/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml")
        #cfg.MODEL.WEIGHTS = "detectron2://COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x/139686956/model_final_5ad38f.pkl"
        cfg.merge_from_file("../detectron2_repo/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml")
        cfg.MODEL.WEIGHTS = args.detectron2_weights
        #"detectron2://Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv/18131413/model_0039999_e76410.pkl"
        cfg.MODEL.MASK_ON = False
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 
        #cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5

        self.predictor = DefaultPredictor(cfg)
        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda, extractor_type=args.extractor_type, game_id=args.game_id, team_0=args.team_0)
示例#24
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        #self.vdo = cv2.VideoCapture()
        self.imgList = natsort.natsorted(glob.glob(self.args.imgs_path))
        self.detectron2 = Detectron2()

        # Initialize coordinate mapper
        self.myCoordMapper = coord_mapper.CoordMapperCSG(
            match_code='HUN-BEL 2. Half')
        self.fps = 6

        self.deepsort = DeepSort(args.deepsort_checkpoint,
                                 lambdaParam=0.6,
                                 coordMapper=self.myCoordMapper,
                                 max_dist=1.0,
                                 min_confidence=0.1,
                                 nms_max_overlap=0.7,
                                 max_iou_distance=0.7,
                                 max_age=self.fps * 3,
                                 n_init=3,
                                 nn_budget=50,
                                 use_cuda=use_cuda)
示例#25
0
def main():
    print('Connecting to camera')
    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture('rtsp://*****:*****@[email protected]/H264?ch=1&subtype=0')  #  - rtsp://admin:comvis@[email protected]:554/H.264
    assert cap.isOpened(), 'Unable to connect to camera'
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    print('Loading models')
    detector = Detector('weights/yolov5s.pt',
                        img_size=(640, 640),
                        conf_thresh=0.4,
                        iou_thresh=0.5,
                        agnostic_nms=False,
                        device=device)
    deepsort = DeepSort('weights/ckpt.t7',
                        max_dist=0.2,
                        min_confidence=0.3,
                        nms_max_overlap=0.5,
                        max_iou_distance=0.7,
                        max_age=70,
                        n_init=3,
                        nn_budget=100,
                        device=device)
    bboxes_visualizer = BoundingBoxesVisualizer()
    fps_estimator = MeanEstimator()
    person_cls_id = detector.names.index('person')  # get id of 'person' class

    width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    cam_fps = int(cap.get(cv2.CAP_PROP_FPS))
    print(f'Starting capture, camera_fps={cam_fps}')

    # Start of demo
    win_name = 'MICA ReID Demo'
    cv2.namedWindow(win_name, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.resizeWindow(win_name, width, height)
    frame_id = 0
    while True:
        start_it = time.time()
        ret, img = cap.read()
        if not ret:
            print('Unable to read camera')
            break
        detections = detector.detect([img])[0]

        num_people = 0
        if detections is not None:
            detections = detections[detections[:, -1].eq(
                person_cls_id)]  # filter person
            xywh, confs = parse_detection(detections)
            outputs = deepsort.update(xywh, confs, img)
            num_people = len(outputs)
            bboxes_visualizer.remove([
                t.track_id for t in deepsort.tracker.tracks
                if t.time_since_update > 3 or t.is_deleted()
            ])
            bboxes_visualizer.update(outputs)
            # draw detections
            for pid in outputs[:, -1]:
                bboxes_visualizer.plot(img,
                                       pid,
                                       label=f'Person {pid}',
                                       line_thickness=5,
                                       trail_trajectory=True,
                                       trail_bbox=False)
        # draw counting
        overlay = img.copy()
        count_str = f'Number of people: {num_people}'
        text_size = cv2.getTextSize(count_str, 0, fontScale=0.5,
                                    thickness=1)[0]
        cv2.rectangle(overlay, (10, 10 + 10),
                      (15 + text_size[0], 10 + 20 + text_size[1]),
                      (255, 255, 255), -1)
        img = cv2.addWeighted(overlay, 0.4, img, 0.6, 0)
        cv2.putText(img,
                    count_str, (12, 10 + 15 + text_size[1]),
                    0,
                    0.5, (0, 0, 0),
                    thickness=1,
                    lineType=cv2.LINE_AA)

        # show
        cv2.imshow(win_name, img)
        key = cv2.waitKey(1)
        elapsed_time = time.time() - start_it
        fps = fps_estimator.update(1 / elapsed_time)
        print(
            f'[{frame_id:06d}] num_detections={num_people} fps={fps:.02f} elapsed_time={elapsed_time:.03f}'
        )
        # check key pressed
        if key == ord('q') or key == 27:  # q or esc to quit
            break
        elif key == ord('r'):  # r to reset tracking
            deepsort.reset()
            bboxes_visualizer.clear()
        elif key == 32:  # space to pause
            key = cv2.waitKey(0)
            if key == ord('q') or key == 27:
                break
        frame_id += 1
    cv2.destroyAllWindows()
    cap.release()
示例#26
0
    num_classes = 80

    CUDA = torch.cuda.is_available()

    bbox_attrs = 5 + num_classes

    #change the path accordingly
    print("Loading network.....")
    model = Darknet("/content/Slow-Fast-pytorch-implementation/cfg/yolov3.cfg")
    model.load_weights("/content/Slow-Fast-pytorch-implementation/yolov3.weights")
    print("Network successfully loaded")

    #change the path accordingly
    print("load deep sort network....")
    deepsort = DeepSort("/content/Slow-Fast-pytorch-implementation/deep/checkpoint/ckpt.t7")
    print("Deep Sort Network successfully loaded")

    model.net_info["height"] = args.reso
    inp_dim = int(model.net_info["height"])
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()


    # model(get_test_input(inp_dim, CUDA), CUDA)

    model.eval()
def cameraDetectionDemo(dim, pls, real_xy, jsonName, sensorID, udpClient, addr,
                        camIP):
    global isRunning, Frame, is_show
    onnx_file_path = 'weights/yolov3-416-new_best.onnx'
    engine_file_path = "weights/yolov3-416-new_best.trt"
    init_dict = {'trt': engine_file_path, 'onnx': onnx_file_path}
    onnx2trt = OnnxTensorrtModule(init_dict)

    classes = load_classes('data/new.names')
    colors = pkl.load(open("pallete", "rb"))

    # jsonName = 'cfg.json'
    solve_homegraphy = pnp_object_location(jsonName)
    object_2d_points, object_3d_point = solve_homegraphy.com_cfg()
    h, h_inv = solve_homegraphy.solve_Hom(object_2d_points, object_3d_point)

    scale = 1080 / 630
    pls = pls * scale
    M_real = get_perspective(pls, real_xy)

    ic = is_cross(pls)
    if ic == 1:
        tmp = [pls[2][0], pls[2][1]]
        pls[2], pls[3] = pls[3], tmp
    if ic == 2:
        tmp = [pls[2][0], pls[2][1]]
        pls[2], pls[1] = pls[1], tmp
    pls = pls.astype(np.int)

    deepsort = DeepSort('./deep_sort/deep/checkpoint/ckpt.t7')

    is_show = False
    if is_show:
        cv2.namedWindow(sensorID, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(sensorID, (900, 600))

    frameNo = 1
    while isRunning:
        start = time.time()

        detect_time = time.time()
        if Ret and Frame is not None:
            frame = Frame

            detect_time = time.time()
            rgb_image, outputs_tracking = video_demo(frame, dim, pls, onnx2trt,
                                                     deepsort, classes, colors,
                                                     h_inv)

            detect_time = time.time() - detect_time
            detect_time *= 1000
            print(detect_time)

            resultSender(outputs_tracking, h_inv, Sensor_ID, udpClient, addr,
                         detect_time)
            rgb_image = drawM(rgb_image, pls)

            if is_show:
                cv2.imshow(sensorID, rgb_image)
                if cv2.waitKey(1) == 27:
                    break
            # time.sleep(0.05)

            frameNo += 1

            end = max(1, (time.time() - start) * 1000)
            s = '{}: detect:{:.2f} ms total:{:.2f} ms fps:{:.1f}'.format(
                frameNo, detect_time, end, 1000 / end)
            # print(s)
            # print(frameNo)

    isRunning = False
    if is_show:
        cv2.destroyWindow(sensorID)
    CUDA = torch.cuda.is_available()

    num_classes = 80

    CUDA = torch.cuda.is_available()

    bbox_attrs = 5 + num_classes

    print("Loading network.....")
    model = Darknet("cfg/yolov3.cfg")
    model.load_weights("yolov3.weights")
    print("Network successfully loaded")

    print("load deep sort network....")
    deepsort = DeepSort("deep/checkpoint/ckpt.t7")
    print("Deep Sort Network successfully loaded")

    model.net_info["height"] = args.reso
    inp_dim = int(model.net_info["height"])
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()

    # model(get_test_input(inp_dim, CUDA), CUDA)

    model.eval()

    #######for sp detec##########
示例#29
0
from yolo3.detect.video_detect import VideoDetector
from yolo3.models import Darknet

if __name__ == '__main__':
    LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
    logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)

    model = Darknet("config/yolov4.cfg", img_size=(608, 608))
    model.load_darknet_weights("weights/yolov4.weights")
    model.to("cuda:0")

    # 跟踪器
    tracker = DeepSort("weights/ckpt.t7",
                       min_confidence=1,
                       use_cuda=True,
                       nn_budget=30,
                       n_init=3,
                       max_iou_distance=0.7,
                       max_dist=0.3,
                       max_age=30)

    # Action Identify
    # action_id = ActionIdentify(actions=[TakeOff(4, delta=(0, 1)),
    #                                     Landing(4, delta=(2, 2)),
    #                                     Glide(4, delta=(1, 2)),
    #                                     FastCrossing(4, speed=0.2),
    #                                     BreakInto(0, timeout=2)],
    #                            max_age=30,
    #                            max_size=8)

    video_detector = VideoDetector(
        model,
示例#30
0
if not exists('yolov3.weights'):
  !wget -q https://pjreddie.com/media/files/yolov3.weights
    
if not exists('ckpt.t7'):
  file_id = '1_qwTWdzT9dWNudpusgKavj_4elGgbkUN'
  !curl -Lb ./cookie "https://drive.google.com/uc?export=download&id={file_id}" -o ckpt.t7

import cv2
import time

from YOLOv3 import YOLOv3
from deep_sort import DeepSort
from util import draw_bboxes

yolo3 = YOLOv3("deep_sort_pytorch/YOLOv3/cfg/yolo_v3.cfg","yolov3.weights","deep_sort_pytorch/YOLOv3/cfg/coco.names", is_xywh=True)
deepsort = DeepSort("ckpt.t7")

VIDEO_URL = 'http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/Datasets/TownCentreXVID.avi'
DURATION_S = 20  # process only the first 20 seconds



video_file_name = 'video.mp4'
if not exists(video_file_name):
  !wget -q $VIDEO_URL
  dowloaded_file_name = basename(VIDEO_URL)
  # convert to MP4, because we can show only MP4 videos in the colab noteook
  !ffmpeg -y -loglevel info -t $DURATION_S -i $dowloaded_file_name $video_file_name
  

def show_local_mp4_video(file_name, width=640, height=480):