Exemplo n.º 1
0
    def __init__(self, opt,polygon, paths, polygon2=None,frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        anchor_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
        anchor_scales = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
        input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
        self.input_size = input_sizes[opt.compound_coef] 
        if opt.detection_model=='Efficient' :
            self.obj_list =['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
                'fire hydrant', '', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
                'cow', 'elephant', 'bear', 'zebra', 'giraffe', '', 'backpack', 'umbrella', '', '', 'handbag', 'tie',
                'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
                'skateboard', 'surfboard', 'tennis racket', 'bottle', '', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
                'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut',
                'cake', 'chair', 'couch', 'potted plant', 'bed', '', 'dining table', '', '', 'toilet', '', 'tv',
                'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
                'refrigerator', '', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
                'toothbrush']
            self.person_or_motorcycle=['person']
            self.obj_interest=[ 'motorcycle','bicycle', 'bus', 'truck','car'] if self.person_or_motorcycle[0]!='person' else [ 'person', 'bus', 'truck','car']
            self.detetection_model= EfficientDetBackbone(compound_coef=opt.compound_coef, num_classes=len(self.obj_list),
                                ratios=anchor_ratios, scales=anchor_scales)
        
            self.detetection_model.load_state_dict(torch.load(f'EfficientDet/weights/efficientdet-d{opt.compound_coef}.pth'))
            self.detetection_model.eval()
            device = torch.device('cuda:0')
            self.detetection_model = self.detetection_model.to(device)
        elif  opt.detection_model=='FasterRcnn' :
            config_file = "Drone_FasterRCNN/drone_demo/e2e_faster_rcnn_X_101_32x8d_FPN_1x_visdrone.yaml"
            cfg.merge_from_file(config_file)
            cfg.merge_from_list(["MODEL.WEIGHT", "Drone_FasterRCNN/drone_demo/visdrone_model_0360000.pth"])
            self.detetection_model = COCODemo(
                cfg,
                min_image_size=opt.min_img_size,
                confidence_threshold=opt.conf_thres,
            )
            label_of_interest=[
                    # "__background",
                    # "unused",
                    # "pedestrian",
                    # "person",
                    # "bicycle",
                    "car",
                    "van",
                    "truck",
                    # "tricycle",
                    # "awning-tricycle",
                    "bus",
                    "motor"
            ]
            self.person_or_motorcycle=["motor"]
            #'bicycle'
            self.obj_interest=[ 'motor', 'bus', 'truck','car','van', "tricycle"] if self.person_or_motorcycle[0]!='person' else [ 'person', 'bus', 'truck','car','van', "tricycle"]
        else:
            raise('Not supported detector model')

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K

        self.kalman_filter = KalmanFilter()

        self.polygon=polygon
        self.paths=paths
        self.polygon2=polygon2
        self.line1=[polygon[0],polygon[1]]
        self.line2=[polygon[1],polygon[2]]
        self.line3=[polygon[2],polygon[3]]
        self.line4=[polygon[0],polygon[4]]
        self.two_polygon_system=True
        self.warmup_frame=0 
        self.virtual_polygon= [
                [
                    0,
                    680
                ],
                [
                    0,
                    149
                ],
                [
                    1270,
                    149
                ],
                [
                    1270,
                    680
                ]
            ]
Exemplo n.º 2
0
    def __init__(self, opt, polygon, paths, polygon2=None, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        anchor_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
        anchor_scales = [2**0, 2**(1.0 / 3.0), 2**(2.0 / 3.0)]
        input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
        self.input_size = input_sizes[opt.compound_coef]
        self.obj_list = [
            'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
            'train', 'truck', 'boat', 'traffic light', 'fire hydrant', '',
            'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
            'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
            '', 'backpack', 'umbrella', '', '', 'handbag', 'tie', 'suitcase',
            'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
            'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
            'tennis racket', 'bottle', '', 'wine glass', 'cup', 'fork',
            'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
            'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
            'couch', 'potted plant', 'bed', '', 'dining table', '', '',
            'toilet', '', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
            'cell phone', 'microwave', 'oven', 'toaster', 'sink',
            'refrigerator', '', 'book', 'clock', 'vase', 'scissors',
            'teddy bear', 'hair drier', 'toothbrush'
        ]
        self.person_or_motorcycle = ['motorcycle', 'bicycle']
        self.obj_interest = [
            'motorcycle', 'bicycle', 'bus', 'truck', 'car'
        ] if self.person_or_motorcycle[0] != 'person' else [
            'person', 'bus', 'truck', 'car'
        ]
        print(self.obj_interest)
        self.detetection_model = EfficientDetBackbone(
            compound_coef=opt.compound_coef,
            num_classes=len(self.obj_list),
            ratios=anchor_ratios,
            scales=anchor_scales)

        self.detetection_model.load_state_dict(
            torch.load(
                f'EfficientDet/weights/efficientdet-d{opt.compound_coef}.pth'))
        self.detetection_model.eval()
        device = torch.device('cuda:0')
        self.detetection_model = self.detetection_model.to(device)

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K

        self.kalman_filter = KalmanFilter()

        self.polygon = polygon
        self.paths = paths
        self.polygon2 = polygon2
        self.line2 = [self.polygon[1], self.polygon[2]]
        self.line1 = [self.polygon[4], self.polygon[3]] if len(
            self.polygon) == 5 else [self.polygon[0], self.polygon[3]] if len(
                self.polygon) == 4 else None
        self.two_polygon_system = False
        self.warmup_frame = 6 if self.two_polygon_system else 0
        self.virtual_polygon = [[0, 573], [0, 109], [1270, 109], [1270, 573]]