def __init__(self, args):
     self.args = args
     use_cuda = bool(strtobool(self.args.use_cuda))
     #use_cuda=False
     if args.display:
         cv2.namedWindow("test", cv2.WINDOW_NORMAL)
         cv2.resizeWindow("test", args.display_width, args.display_height)
     self.vdo = cv2.VideoCapture()
     self.detectron2 = Detectron2()
     self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
Exemple #2
0
    def __init__(self, args):
        self.args = args

        use_cuda = bool(strtobool(self.args.use_cuda))

        self.detectron2 = Detectron2(self.args.detectron_cfg,
                                     self.args.detectron_ckpt)
        if self.args.deep_sort:
            self.deepsort = DeepSort(args.deepsort_checkpoint,
                                     use_cuda=use_cuda)
Exemple #3
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        self.vdo = cv2.VideoCapture()
        self.detectron2 = Detectron2()

        # Initialize coordinate mapper
        myCoordMapper = coord_mapper.CoordMapper(coord_mapper.ISSIA_kozep_elorol)

        self.deepsort = DeepSort(args.deepsort_checkpoint, lambdaParam=1.0, coordMapper=myCoordMapper, max_dist=1.0, min_confidence=0.1, 
                        nms_max_overlap=0.7, max_iou_distance=0.7, max_age=75, n_init=3, nn_budget=50, use_cuda=use_cuda)
Exemple #4
0
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        #self.vdo = cv2.VideoCapture()
        self.imgList = natsort.natsorted(glob.glob(self.args.imgs_path))
        self.detectron2 = Detectron2()

        # Initialize coordinate mapper
        self.myCoordMapper = coord_mapper.CoordMapperCSG(
            match_code='HUN-BEL 2. Half')
        self.fps = 6

        self.deepsort = DeepSort(args.deepsort_checkpoint,
                                 lambdaParam=0.6,
                                 coordMapper=self.myCoordMapper,
                                 max_dist=1.0,
                                 min_confidence=0.1,
                                 nms_max_overlap=0.7,
                                 max_iou_distance=0.7,
                                 max_age=self.fps * 3,
                                 n_init=3,
                                 nn_budget=50,
                                 use_cuda=use_cuda)
    def __init__(self):
        self.use_cuda = True
        self.display = True
        self.config = yaml.load(open('config.yaml', 'r'))
        self.dataset = self.config['DATASET']['NAME']
        self.detectron2 = Detectron2()

        if (self.dataset == 'kitti'):
            self.rgb_path = self.config['DATASET']['KITTI']['DATA_PATH']
            self.sequence_name = self.config['DATASET']['KITTI'][
                'SEQUENCE_NAME']

            self.sequence_list = os.listdir(self.rgb_path)
            self.velo2cam = np.array(
                self.config['DATASET']['KITTI']['TRANSFORMS']['Velo2cam'])

            self.kitti_timestamps = open(
                self.config['DATASET']['KITTI']['TIMESTAMPS']).readlines()
            self.kitti_odom = open(
                self.config['DATASET']['KITTI']['ODOM_PATH']).readlines()

            self.matches = sorted(self.sequence_list)
            self.max_vel = self.config['DATASET']['KITTI']['MAX_VEL']
            self.max_iou = self.config['DATASET']['KITTI']['MAX_IOU']
            self.max_depth = self.config['DATASET']['KITTI']['MAX_DEPTH']
            self.min_depth = self.config['DATASET']['KITTI']['MIN_DEPTH']
            self.mask_points = self.config['DATASET']['KITTI']['MASK_POINTS']

            self.fx = self.config['DATASET']['KITTI']['CAMERA'][
                'focal_length_x']
            self.fy = self.config['DATASET']['KITTI']['CAMERA'][
                'focal_length_y']
            self.cx = self.config['DATASET']['KITTI']['CAMERA'][
                'optical_center_x']
            self.cy = self.config['DATASET']['KITTI']['CAMERA'][
                'optical_center_y']

        elif (self.dataset == 'tum'):
            self.sequence_name = self.config['DATASET']['TUM']['SEQUENCE_NAME']
            self.rgb_path = self.config['DATASET']['TUM']['RGB_PATH']
            self.depth_path = self.config['DATASET']['TUM']['DEPTH_PATH']
            self.odom_path = self.config['DATASET']['TUM']['ODOM_PATH']

            self.first_list = read_file_list(self.rgb_path + '.txt')
            self.second_list = read_file_list(self.depth_path + '.txt')
            self.third_list = read_file_list(self.odom_path + '.txt')

            self.matches = associate(self.first_list, self.second_list,
                                     self.third_list, 0.0, 0.02)

            self.max_vel = self.config['DATASET']['TUM']['MAX_VEL']
            self.max_iou = self.config['DATASET']['TUM']['MAX_IOU']
            self.max_depth = self.config['DATASET']['TUM']['MAX_DEPTH']
            self.min_depth = self.config['DATASET']['TUM']['MIN_DEPTH']
            self.depth_factor = self.config['DATASET']['TUM']['DEPTH_FACTOR']
            self.mask_points = self.config['DATASET']['TUM']['MASK_POINTS']

            self.fx = self.config['DATASET']['TUM']['CAMERA']['focal_length_x']
            self.fy = self.config['DATASET']['TUM']['CAMERA']['focal_length_y']
            self.cx = self.config['DATASET']['TUM']['CAMERA'][
                'optical_center_x']
            self.cy = self.config['DATASET']['TUM']['CAMERA'][
                'optical_center_y']

        self.class_names = self.config['CLASSES']['ALL']
        self.rigid = self.config['CLASSES']['RIGID']
        self.not_rigid = self.config['CLASSES']['NON_RIGID']
        self.save_mask = self.config['SAVE_MASK']

        del self.config
            label = labels[idx]
        # Default label
        else:
            label = "human"
        shape_jsons_l.append(_get_shape_j(point, label))

    return _get_labelme_template(im, shape_jsons_l, imp, h, w)


if __name__ == "__main__":
    from detectron2_detection import Detectron2
    import json

    w_p = "/nfs/gpu14_datasets/surveillance_weights/visdrone_t1/model_0111599.pth"
    cfg_p = "/nfs/gpu14_datasets/surveillance_weights/visdrone_t1/test.yaml"
    det = Detectron2(cfg_path=cfg_p, weights_path=w_p)

    ### Params to be changed
    process_freq = 100
    root = "/data/client_datasets/idea_forge/videos/29_may/"
    # vs = ["03April202017_42_05.mp4", "07April202012_40_03.mp4", "10April202019_02_52.mp4", "11April202016_23_55.mp4", "13April202017_22_58.mp4", "14April202009_16_45.mp4"]
    vs = ["06April202012_06_22.mp4"]
    vids = [root + i for i in vs]
    save_root = "/data/client_datasets/idea_forge/annotations/model_preds"

    for v in vids:
        print(v)
        base_name = os.path.basename(v).replace(".mp4", "_")
        save_f = os.path.join(save_root, base_name)
        os.mkdir(save_f)
        cap = cv2.VideoCapture(v)
Exemple #7
0
    def __init__(self):
        self.use_cuda = True
        self.display = True
        self.config = yaml.load(open('config.yaml', 'r'))
        self.dataset = self.config['DATASET']['NAME']
        self.detectron2 = Detectron2()
        # ros
        self.image_pub = rospy.Publisher("/segmentation_mask",
                                         Image_ros,
                                         queue_size=10,
                                         latch=True)
        self.pose_sub = rospy.Subscriber("/camera_pose", Image_ros,
                                         self.pose_callback)
        # self.pose_true = False
        self.pose = np.eye(4)
        self.bridge = CvBridge()

        if (self.dataset == 'kitti'):

            self.sequence_list = os.listdir(
                self.config['DATASET']['KITTI']['DATA_PATH'])
            self.velo2cam = np.array(
                self.config['DATASET']['KITTI']['TRANSFORMS']['Velo2cam'])

            self.kitti_timestamps = open(
                self.config['DATASET']['KITTI']['TIMESTAMPS']).readlines()
            self.kitti_odom = open(
                self.config['DATASET']['KITTI']['ODOM_PATH']).readlines()

            self.matches = sorted(self.sequence_list)
            self.max_vel = self.config['DATASET']['KITTI']['MAX_VEL']

            self.fx = self.config['DATASET']['KITTI']['CAMERA'][
                'focal_length_x']
            self.fy = self.config['DATASET']['KITTI']['CAMERA'][
                'focal_length_y']
            self.cx = self.config['DATASET']['KITTI']['CAMERA'][
                'optical_center_x']
            self.cy = self.config['DATASET']['KITTI']['CAMERA'][
                'optical_center_y']

        elif (self.dataset == 'tum'):
            #self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
            self.first_list = read_file_list(
                self.config['DATASET']['TUM']['RGB_PATH'] + '.txt')
            self.second_list = read_file_list(
                self.config['DATASET']['TUM']['DEPTH_PATH'] + '.txt')
            self.third_list = read_file_list(
                self.config['DATASET']['TUM']['ODOM_PATH'] + '.txt')

            self.matches = associate(self.first_list, self.second_list,
                                     self.third_list, 0.0, 0.02)

            self.max_vel = self.config['DATASET']['TUM']['MAX_VEL']
            self.fx = self.config['DATASET']['TUM']['CAMERA']['focal_length_x']
            self.fy = self.config['DATASET']['TUM']['CAMERA']['focal_length_y']
            self.cx = self.config['DATASET']['TUM']['CAMERA'][
                'optical_center_x']
            self.cy = self.config['DATASET']['TUM']['CAMERA'][
                'optical_center_y']

        self.class_names = self.config['CLASSES']['ALL']
        self.rigid = self.config['CLASSES']['RIGID']
        self.not_rigid = self.config['CLASSES']['NON_RIGID']
 def __init__(self, args):
     self.args = args
     self.vdo = cv2.VideoCapture()
     self.detectron2 = Detectron2()