Ejemplo n.º 1
0
    def read_config(self):
        config_path = self.config_path
        cfg = Config.fromfile(self.config_path)
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        self.net.load_state_dict(torch.load(self.model_path)["state_dict"])
        self.net = self.net.to(self.device).eval()

        self.range = cfg.voxel_generator.range
        self.voxel_size = cfg.voxel_generator.voxel_size
        self.max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel
        self.max_voxel_num = cfg.voxel_generator.max_voxel_num
        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )
        # nuscenes dataset
        lidar2imu_t = np.array([0.985793, 0.0, 1.84019])
        lidar2imu_r = Quaternion([0.706749235, -0.01530099378, 0.0173974518, -0.7070846])

        ## UDI dataset
        # lidar2imu_t = np.array([1.50, 0., 1.42])
        # lidar2imu_r = Quaternion([1., 0., 0., 0.])
        self.lidar2imu = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=True)
        self.imu2lidar = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=False)
Ejemplo n.º 2
0
def getDataFromPcd(path, cfg):
    points = pcl.load(path).to_array()

    points = np.matmul(cfg.rotMat, points.T).T

    vgcfg = cfg.voxel_generator

    # create voxel generator
    vg = VoxelGenerator(
        voxel_size=vgcfg.voxel_size,
        point_cloud_range=vgcfg.range,
        max_num_points=vgcfg.max_points_in_voxel,
        max_voxels=vgcfg.max_voxel_num,
    )

    grid_size = vg.grid_size

    voxels, coordinates, num_points = vg.generate(points)
    num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

    data = dict(points=points,
                voxels=voxels,
                shape=grid_size,
                num_points=num_points,
                num_voxels=num_voxels,
                coordinates=coordinates)
    return data
Ejemplo n.º 3
0
class Voxelization(object):
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = cfg.max_voxel_num
        self.include_pt_to_voxel = cfg.get("include_pt_to_voxel", False)

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
            return_pt_to_voxel=self.include_pt_to_voxel,
        )

    def __call__(self, res, info):
        voxel_size = self.voxel_generator.voxel_size
        pc_range = self.voxel_generator.point_cloud_range
        grid_size = self.voxel_generator.grid_size

        if res["mode"] == "train":
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"],
                                                    bv_range)
            _dict_select(gt_dict, mask)

            res["lidar"]["annotations"] = gt_dict

        # points = points[:int(points.shape[0] * 0.1), :]
        points = res["lidar"]["points"]

        if self.include_pt_to_voxel:
            voxels, coordinates, num_points, pt_to_voxel = self.voxel_generator.generate(
                points)
        else:
            voxels, coordinates, num_points = self.voxel_generator.generate(
                points)

        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points,
            num_voxels=num_voxels,
            shape=grid_size,
        )

        if self.include_pt_to_voxel:
            res["lidar"]["voxels"]["pt_to_voxel"] = pt_to_voxel

        if "valid_idx" in res["lidar"]:  # panoview projector is used
            valid_idx = res["lidar"]["valid_idx"]

        return res, info
Ejemplo n.º 4
0
class Voxelization(object):
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range  # [0, -40.0, -3.0, 70.4, 40.0, 1.0]
        self.voxel_size = cfg.voxel_size  # [0.05, 0.05, 0.1]
        self.max_points_in_voxel = cfg.max_points_in_voxel  # 5
        self.max_voxel_num = cfg.max_voxel_num  # 20000
        self.far_points_first = cfg.far_points_first
        self.shuffle = False

        self.voxel_generator = VoxelGenerator(
            point_cloud_range=self.range,
            voxel_size=self.voxel_size,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    def __call__(self, res, info):
        pc_range = self.voxel_generator.point_cloud_range  # [0, -40, -3, 70.4, 40, 1]
        grid_size = self.voxel_generator.grid_size  # [1408, 1600, 40]

        if res["mode"] == "train" and res['labeled']:
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]  # [  0. , -40. ,  70.4,  40. ],
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"],
                                                    bv_range)
            _dict_select(gt_dict, mask)
            res["lidar"]["annotations"] = gt_dict
            self.shuffle = True

        points = res["lidar"]["points"]
        voxels, coordinates, num_points_per_voxel = self.voxel_generator.generate(
            points)
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points_per_voxel,
            num_voxels=num_voxels,
            shape=grid_size,
        )

        # voxelization of raw points without transformation
        if "points_raw" in res["lidar"].keys():
            points_raw = res["lidar"]["points_raw"]
            voxels_raw, coordinates_raw, num_points_per_voxel_raw = self.voxel_generator.generate(
                points_raw)
            num_voxels_raw = np.array([voxels_raw.shape[0]], dtype=np.int64)
            res["lidar"]["voxels_raw"] = dict(
                voxels=voxels_raw,
                coordinates=coordinates_raw,
                num_points=num_points_per_voxel_raw,
                num_voxels=num_voxels_raw,
                shape=grid_size,
            )

        return res, info
Ejemplo n.º 5
0
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = cfg.max_voxel_num

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )
Ejemplo n.º 6
0
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = [cfg.max_voxel_num, cfg.max_voxel_num] if isinstance(cfg.max_voxel_num, int) else cfg.max_voxel_num

        self.double_flip = cfg.get('double_flip', False)

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num[0],
        )
Ejemplo n.º 7
0
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range  # [0, -40.0, -3.0, 70.4, 40.0, 1.0]
        self.voxel_size = cfg.voxel_size  # [0.05, 0.05, 0.1]
        self.max_points_in_voxel = cfg.max_points_in_voxel  # 5
        self.max_voxel_num = cfg.max_voxel_num  # 20000
        self.far_points_first = cfg.far_points_first
        self.shuffle = False

        self.voxel_generator = VoxelGenerator(
            point_cloud_range=self.range,
            voxel_size=self.voxel_size,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )
Ejemplo n.º 8
0
    def read_config(self):
        config_path = self.config_path
        cfg = Config.fromfile(self.config_path)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        self.net.load_state_dict(torch.load(self.model_path)["state_dict"])
        self.net = self.net.to(self.device).eval()

        self.range = cfg.voxel_generator.range
        self.voxel_size = cfg.voxel_generator.voxel_size
        self.max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel
        self.max_voxel_num = cfg.voxel_generator.max_voxel_num
        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )
Ejemplo n.º 9
0
def build_voxel_generator(voxel_config):

    voxel_generator = VoxelGenerator(
        voxel_size=voxel_config.VOXEL_SIZE,
        point_cloud_range=voxel_config.RANGE,
        max_num_points=voxel_config.MAX_POINTS_NUM_PER_VOXEL,
        max_voxels=20000,
    )

    return voxel_generator
Ejemplo n.º 10
0
def getDataFromDepth(path, cfg):
    '''
    get point cloud data from depth image and calibration file
    and prepare the data for inputs
        path: depth image path
        calibPath: camera intrinsic path
    '''
    fs = cv.FileStorage(cfg.intrinsic, cv.FILE_STORAGE_READ)
    cameraMat = fs.getNode('CameraMat').mat()
    # distCoeff = fs.getNode('DistCoeff').mat()
    fs.release()
    fx = cameraMat[0, 0]
    fy = cameraMat[1, 1]
    cx = cameraMat[0, 2]
    cy = cameraMat[1, 2]

    points = depth2points(cv.imread(path, -1), fx, fy, cx, cy,
                          cfg.maxDepth).reshape((-1, 3))

    points = np.matmul(cfg.rotMat, points.T).T

    vgcfg = cfg.voxel_generator

    # create voxel generator
    vg = VoxelGenerator(
        voxel_size=vgcfg.voxel_size,
        point_cloud_range=vgcfg.range,
        max_num_points=vgcfg.max_points_in_voxel,
        max_voxels=vgcfg.max_voxel_num,
    )

    grid_size = vg.grid_size

    voxels, coordinates, num_points = vg.generate(points)
    num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

    data = dict(points=points,
                voxels=voxels,
                shape=grid_size,
                num_points=num_points,
                num_voxels=num_voxels,
                coordinates=coordinates)
    return data
Ejemplo n.º 11
0
class Voxelization(object):
    '''
    configs:
        range=[0, -40.0, -3.0, 70.4, 40.0, 1.0],
        voxel_size=[0.05, 0.05, 0.1],
        max_points_in_voxel=5,
        max_voxel_num=20000,
    '''
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = cfg.max_voxel_num

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    def __call__(self, res, info):

        voxel_size = self.voxel_generator.voxel_size  # [0.05, 0.05, 0.1 ]
        pc_range = self.voxel_generator.point_cloud_range  # [0, -40, -3, 70.4, 40, 1]
        grid_size = self.voxel_generator.grid_size  # [1408, 1600, 40]

        # remove gt_boxes out of bv_range
        if res["mode"] == "train":
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]  # [  0. , -40. ,  70.4,  40. ],
            # todo: try get_valid_mask_by_pc_valid_range in preprocess.py, how about z-axis
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"],
                                                    bv_range)
            _dict_select(gt_dict, mask)
            res["lidar"]["annotations"] = gt_dict

        # todo: how about the points out of pc_valid_range
        voxels, coordinates, num_points = self.voxel_generator.generate(
            res["lidar"]["points"])
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

        # pack voxelization result
        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points,
            num_voxels=num_voxels,
            shape=grid_size,
        )

        return res, info
Ejemplo n.º 12
0
class Voxelization(object):
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range  # [0, -40.0, -3.0, 70.4, 40.0, 1.0]
        self.voxel_size = cfg.voxel_size  # [0.05, 0.05, 0.1]
        self.max_points_in_voxel = cfg.max_points_in_voxel  # 5
        self.max_voxel_num = cfg.max_voxel_num  # 20000
        self.far_points_first = cfg.far_points_first
        self.shuffle = False

        self.voxel_generator = VoxelGenerator(
            point_cloud_range=self.range,
            voxel_size=self.voxel_size,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    def __call__(self, res, info):
        voxel_size = self.voxel_generator.voxel_size  # [0.05, 0.05, 0.1 ]
        pc_range = self.voxel_generator.point_cloud_range  # [0, -40, -3, 70.4, 40, 1]
        grid_size = self.voxel_generator.grid_size  # [1408, 1600, 40]

        # remove those gt_boxes (after gt-aug, per-object-aug, and global-aug) out of valid bv_range
        if res["mode"] == "train":
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]  # [  0. , -40. ,  70.4,  40. ],
            # todo: try get_valid_mask_by_pc_valid_range in preprocess.py, how about z-axis
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"],
                                                    bv_range)
            _dict_select(gt_dict, mask)
            res["lidar"]["annotations"] = gt_dict
            self.shuffle = True

        points = res["lidar"]["points"]
        if self.far_points_first:
            points = box_np_ops.far_points_first(points, 40.0,
                                                 self.max_voxel_num,
                                                 self.shuffle)

        voxels, coordinates, num_points_per_voxel = self.voxel_generator.generate(
            points)
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

        # pack voxelization result
        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points_per_voxel,
            num_voxels=num_voxels,
            shape=grid_size,
        )
        return res, info
Ejemplo n.º 13
0
class Voxelization(object):
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = cfg.max_voxel_num

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    def __call__(self, res, info):
        # [0, -40, -3, 70.4, 40, 1]
        voxel_size = self.voxel_generator.voxel_size
        pc_range = self.voxel_generator.point_cloud_range
        grid_size = self.voxel_generator.grid_size
        # [352, 400]

        if res["mode"] == "train":
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"], bv_range)
            _dict_select(gt_dict, mask)

            res["lidar"]["annotations"] = gt_dict

        # points = points[:int(points.shape[0] * 0.1), :]
        voxels, coordinates, num_points = self.voxel_generator.generate(
            res["lidar"]["points"]
        )
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points,
            num_voxels=num_voxels,
            shape=grid_size,
        )

        return res, info
class Processor_ROS:
    def __init__(self, config_path, model_path):
        self.points = None
        self.config_path = config_path
        self.model_path = model_path
        self.device = None
        self.net = None
        self.voxel_generator = None
        self.inputs = None

    def initialize(self):
        self.read_config()

    def read_config(self):
        config_path = self.config_path
        cfg = Config.fromfile(self.config_path)
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_detector(cfg.model,
                                  train_cfg=None,
                                  test_cfg=cfg.test_cfg)
        self.net.load_state_dict(torch.load(self.model_path)["state_dict"])
        self.net = self.net.to(self.device).eval()

        self.range = cfg.voxel_generator.range
        self.voxel_size = cfg.voxel_generator.voxel_size
        self.max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel
        self.max_voxel_num = cfg.voxel_generator.max_voxel_num
        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    def run(self, points):
        t_t = time.time()
        print(f"input points shape: {points.shape}")
        num_features = 5
        self.points = points.reshape([-1, num_features])
        self.points[:, 4] = 0  # timestamp value

        voxels, coords, num_points = self.voxel_generator.generate(self.points)
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
        grid_size = self.voxel_generator.grid_size
        coords = np.pad(coords, ((0, 0), (1, 0)),
                        mode='constant',
                        constant_values=0)

        voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)
        coords = torch.tensor(coords, dtype=torch.int32, device=self.device)
        num_points = torch.tensor(num_points,
                                  dtype=torch.int32,
                                  device=self.device)
        num_voxels = torch.tensor(num_voxels,
                                  dtype=torch.int32,
                                  device=self.device)

        self.inputs = dict(voxels=voxels,
                           num_points=num_points,
                           num_voxels=num_voxels,
                           coordinates=coords,
                           shape=[grid_size])
        torch.cuda.synchronize()
        t = time.time()

        with torch.no_grad():
            outputs = self.net(self.inputs, return_loss=False)[0]

        # print(f"output: {outputs}")

        torch.cuda.synchronize()
        print("  network predict time cost:", time.time() - t)

        outputs = remove_low_score_nu(outputs, 0.45)

        boxes_lidar = outputs["box3d_lidar"].detach().cpu().numpy()
        print("  predict boxes:", boxes_lidar.shape)

        scores = outputs["scores"].detach().cpu().numpy()
        types = outputs["label_preds"].detach().cpu().numpy()

        boxes_lidar[:, -1] = -boxes_lidar[:, -1] - np.pi / 2

        print(f"  total cost time: {time.time() - t_t}")

        return scores, boxes_lidar, types
Ejemplo n.º 15
0
class Voxelization(object):
    def __init__(self, **kwargs):
        cfg = kwargs.get("cfg", None)
        self.range = cfg.range
        self.voxel_size = cfg.voxel_size
        self.max_points_in_voxel = cfg.max_points_in_voxel
        self.max_voxel_num = [cfg.max_voxel_num, cfg.max_voxel_num] if isinstance(cfg.max_voxel_num, int) else cfg.max_voxel_num

        self.double_flip = cfg.get('double_flip', False)

        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num[0],
        )

    def __call__(self, res, info):
        voxel_size = self.voxel_generator.voxel_size
        pc_range = self.voxel_generator.point_cloud_range
        grid_size = self.voxel_generator.grid_size

        if res["mode"] == "train":
            gt_dict = res["lidar"]["annotations"]
            bv_range = pc_range[[0, 1, 3, 4]]
            mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"], bv_range)
            _dict_select(gt_dict, mask)

            res["lidar"]["annotations"] = gt_dict
            max_voxels = self.max_voxel_num[0]
        else:
            max_voxels = self.max_voxel_num[1]

        voxels, coordinates, num_points = self.voxel_generator.generate(
            res["lidar"]["points"], max_voxels=max_voxels 
        )
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)

        res["lidar"]["voxels"] = dict(
            voxels=voxels,
            coordinates=coordinates,
            num_points=num_points,
            num_voxels=num_voxels,
            shape=grid_size,
            range=pc_range,
            size=voxel_size
        )

        double_flip = self.double_flip and (res["mode"] != 'train')

        if double_flip:
            flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
                res["lidar"]["yflip_points"]
            )
            flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)

            res["lidar"]["yflip_voxels"] = dict(
                voxels=flip_voxels,
                coordinates=flip_coordinates,
                num_points=flip_num_points,
                num_voxels=flip_num_voxels,
                shape=grid_size,
                range=pc_range,
                size=voxel_size
            )

            flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
                res["lidar"]["xflip_points"]
            )
            flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)

            res["lidar"]["xflip_voxels"] = dict(
                voxels=flip_voxels,
                coordinates=flip_coordinates,
                num_points=flip_num_points,
                num_voxels=flip_num_voxels,
                shape=grid_size,
                range=pc_range,
                size=voxel_size
            )

            flip_voxels, flip_coordinates, flip_num_points = self.voxel_generator.generate(
                res["lidar"]["double_flip_points"]
            )
            flip_num_voxels = np.array([flip_voxels.shape[0]], dtype=np.int64)

            res["lidar"]["double_flip_voxels"] = dict(
                voxels=flip_voxels,
                coordinates=flip_coordinates,
                num_points=flip_num_points,
                num_voxels=flip_num_voxels,
                shape=grid_size,
                range=pc_range,
                size=voxel_size
            )            

        return res, info
Ejemplo n.º 16
0
class CenterPoint:
    def __init__(self, config_path, model_path, savingpath):
        self.points = None
        self.config_path = config_path
        self.model_path = model_path
        self.device = None
        self.net = None
        self.voxel_generator = None
        self.inputs = None
        self.savepth = savingpath

    def initialize(self):
        self.read_config()

    def read_config(self):
        config_path = self.config_path
        cfg = Config.fromfile(self.config_path)
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_detector(cfg.model,
                                  train_cfg=None,
                                  test_cfg=cfg.test_cfg)
        self.net.load_state_dict(torch.load(self.model_path)["state_dict"])
        self.net = self.net.to(self.device).eval()

        self.range = cfg.voxel_generator.range
        self.voxel_size = cfg.voxel_generator.voxel_size
        self.max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel
        self.max_voxel_num = cfg.voxel_generator.max_voxel_num
        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )

    # def run(self, points):
    #     t_t = time.time()
    #     print(type(points))
    #     print(points.shape)
    #     print(f"input points shape: {points.shape}")
    #     num_features =  4
    #     self.points = points.reshape([-1, num_features])
    #     self.points[:, 3] = 0 # timestamp value #self.points = np.hstack((aself.points, np.zeros((self.points.shape[0], 1), dtype=self.points.dtype)))

    #     voxels, coords, num_points = self.voxel_generator.generate(self.points)
    #     num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
    #     grid_size = self.voxel_generator.grid_size
    #     coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values = 0)

    #     voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)
    #     coords = torch.tensor(coords, dtype=torch.int32, device=self.device)
    #     num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)
    #     num_voxels = torch.tensor(num_voxels, dtype=torch.int32, device=self.device)

    #     self.inputs = dict(
    #         voxels = voxels,
    #         num_points = num_points,
    #         num_voxels = num_voxels,
    #         coordinates = coords,
    #         shape = [grid_size]
    #     )
    #     torch.cuda.synchronize()
    #     t = time.time()

    #     with torch.no_grad():
    #         outputs = self.net(self.inputs, return_loss=False)[0]

    #     # print(f"output: {outputs}")

    #     torch.cuda.synchronize()
    #     print("  network predict time cost:", time.time() - t)

    #     outputs = remove_low_score_nu(outputs, 0.45)

    #     boxes_lidar = outputs["box3d_lidar"].detach().cpu().numpy()
    #     print("  predict boxes:", boxes_lidar.shape)

    #     scores = outputs["scores"].detach().cpu().numpy()
    #     types = outputs["label_preds"].detach().cpu().numpy()

    #     boxes_lidar[:, -1] = -boxes_lidar[:, -1] - np.pi / 2

    #     print(f"  total cost time: {time.time() - t_t}")

    #     return scores, boxes_lidar, types

    def run(self, pc_folder):

        for ipfile in os.listdir(pc_folder):
            t_t = time.time()
            print("Processing File is {}".format(ipfile))
            self.points = np.fromfile(os.path.join(pc_folder, ipfile),
                                      dtype=np.float32)
            # print(points.shape)
            # print(f"input points shape: {points.shape}")
            ##The below lines are for default pc
            num_features = 5
            self.points = points.reshape([-1, num_features])
            self.points[:, 4] = 0  # timestamp value
            ##The above modification is required on custom data
            #num_features =  5
            #self.points = self.points.reshape([-1, 4])
            # self.points[:, 4] = 0 # timestamp value #
            #self.points = np.hstack((self.points, np.zeros((self.points.shape[0], 1), dtype=self.points.dtype)))

            voxels, coords, num_points = self.voxel_generator.generate(
                self.points)
            num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
            grid_size = self.voxel_generator.grid_size
            coords = np.pad(coords, ((0, 0), (1, 0)),
                            mode='constant',
                            constant_values=0)

            voxels = torch.tensor(voxels,
                                  dtype=torch.float32,
                                  device=self.device)
            coords = torch.tensor(coords,
                                  dtype=torch.int32,
                                  device=self.device)
            num_points = torch.tensor(num_points,
                                      dtype=torch.int32,
                                      device=self.device)
            num_voxels = torch.tensor(num_voxels,
                                      dtype=torch.int32,
                                      device=self.device)

            self.inputs = dict(voxels=voxels,
                               num_points=num_points,
                               num_voxels=num_voxels,
                               coordinates=coords,
                               shape=[grid_size])
            torch.cuda.synchronize()
            t = time.time()

            with torch.no_grad():
                outputs = self.net(self.inputs, return_loss=False)[0]

            # print(f"output: {outputs}")

            torch.cuda.synchronize()
            print("  network predict time cost:", time.time() - t)

            outputs = remove_low_score_nu(outputs, 0.45)

            boxes_lidar = outputs["box3d_lidar"].detach().cpu().numpy()
            print("  predict boxes:", boxes_lidar.shape)

            scores = outputs["scores"].detach().cpu().numpy()
            types = outputs["label_preds"].detach().cpu().numpy()

            boxes_lidar[:, -1] = -boxes_lidar[:, -1] - np.pi / 2

            print(f"  total cost time: {time.time() - t_t}")

            pred2string(scores, types, boxes_lidar, ipfile, self.savepth)

            # return scores, boxes_lidar, types

    def log_predkitti(self, scores, detbox, types):
        pass
Ejemplo n.º 17
0
def crop2assign(example,
                predictions,
                cfg=None,
                device=torch.device("cpu"),
                training=True):
    rank, world_size = get_dist_info()
    # STEP 2: crop enlarged point clouds; organise as new batch'
    # 2.1 Prepare batch targets, filter invalid target
    batch_targets = []
    # 2.1 Prepare batch data
    batch_data = defaultdict(list)

    if training:
        gt_boxes3d = defaultdict(list)

        for idx, sample in enumerate(example["annos"]):
            for branch in sample["gt_boxes"]:
                mask = np.ma.masked_equal(branch, 0).mask
                if np.any(mask):
                    gt_boxes3d[idx].extend(
                        branch[np.where(mask.sum(axis=1) == 0)])
                else:
                    gt_boxes3d[idx].extend(branch)

    # Batch size
    cnt = 0
    for idx, dt_boxes3d in enumerate(predictions):
        sample_points = (
            example["points"][example["points"][:,
                                                0] == idx][:,
                                                           1:].cpu().numpy())
        if sample_points[:, -1].min() < 0:
            sample_points[:, -1] += 0.5

        if training:
            gp = example["ground_plane"][idx]

            sample_gt_boxes3d = np.array(gt_boxes3d[idx])
            if not sample_gt_boxes3d.shape[0] == 0:
                gt_bevs = sample_gt_boxes3d[:, [0, 1, 3, 4, -1]]
            else:
                gt_bevs = np.zeros((0, 5))

        sample_dt_boxed3d = dt_boxes3d["box3d_lidar"].cpu().numpy()

        if training:
            dt_bevs = sample_dt_boxed3d[:, [0, 1, 3, 4, -1]]

            gp_height = cfg.anchor.center - cfg.anchor.height / 2

            # Find max match gt
            ious = riou_cc(gt_bevs, dt_bevs)
            dt_max_matched_gt = ious.argmax(axis=0)
            max_ious = ious.max(axis=0)

            selected = np.where(max_ious >= 0.7)

            max_ious = max_ious[selected]
            dt_max_matched_gt = dt_max_matched_gt[selected]
            # remove fp
            sample_dt_boxed3d = sample_dt_boxed3d[selected]

        # enlarge box
        sample_dt_boxed3d[:, [3, 4]] += cfg.roi_context

        indices = points_in_rbbox(sample_points, sample_dt_boxed3d)
        num_points_in_gt = indices.sum(0)
        # remove empty dt boxes
        selected_by_points = np.where(num_points_in_gt > 0)

        boxes = sample_dt_boxed3d[selected_by_points]
        indices = indices.transpose()[selected_by_points].transpose()

        if training:
            dt_max_matched_gt = dt_max_matched_gt[selected_by_points]
            cnt += len(boxes)

        # voxel_generators to form fixed_size batch input
        fixed_size = cfg.dense_shape  # w, l, h

        for i, box in enumerate(boxes):

            if training:
                batch_data["ground_plane"].append(gp)

                # 1. generate regression targets
                matched_gt = sample_gt_boxes3d[dt_max_matched_gt[i]]

                height_a = cfg.anchor.height
                z_center_a = cfg.anchor.center
                # z_g = matched_gt[2] + matched_gt[5]/2. - (-0.14) # z top
                z_g = matched_gt[2] - z_center_a
                h_g = matched_gt[5] - height_a
                g_g = (matched_gt[2] - matched_gt[5] / 2) - (float(gp))
                dt_target = np.array([idx, i, z_g, h_g, g_g])

                batch_targets.append(dt_target)

            if not training:
                batch_data["stage_one_output_boxes"].append(box)

            # 2. prepare data

            box_points = sample_points[indices[:, i]].copy()
            # img = kitti_vis(box_points, np.array(box).reshape(1, -1))
            # img.tofile(open("./bev.bin", "wb"))

            # move to center
            box_center = box[:2]
            box_points[:, :2] -= box_center
            # rotate to canonical
            box_yaw = box[-1:]
            box_points_canonical = rotation_3d_in_axis(
                box_points[:, :3][np.newaxis, ...], -box_yaw, axis=2)[0]
            box_points_canonical = np.hstack(
                (box_points_canonical, box_points[:, -1:]))

            if box_points_canonical.shape[0] > 0:
                point_cloud_range = [
                    -box[3] / 2,
                    -box[4] / 2,
                    -3.5,
                    box[3] / 2,
                    box[4] / 2,
                    1.5,
                ]
            else:
                import pdb

                pdb.set_trace()

            voxel_size = [
                (point_cloud_range[3] - point_cloud_range[0]) / fixed_size[0],
                (point_cloud_range[4] - point_cloud_range[1]) / fixed_size[1],
                (point_cloud_range[5] - point_cloud_range[2]) / fixed_size[2],
            ]
            for vs in voxel_size:
                if not vs > 0:
                    import pdb

                    pdb.set_trace()

            vg = VoxelGenerator(
                voxel_size=voxel_size,
                point_cloud_range=point_cloud_range,
                max_num_points=20,
                max_voxels=2000,
            )

            voxels, coordinates, num_points = vg.generate(box_points_canonical,
                                                          max_voxels=2000)
            batch_data["voxels"].append(voxels)
            batch_data["coordinates"].append(coordinates)
            batch_data["num_points"].append(num_points)

    if training:
        batch_targets = torch.tensor(np.array(batch_targets),
                                     dtype=torch.float32,
                                     device=device)
        batch_data["targets"] = batch_targets

        batch_data["ground_plane"] = torch.tensor(batch_data["ground_plane"],
                                                  dtype=torch.float32,
                                                  device=device)

    for k, v in batch_data.items():
        if k in ["voxels", "num_points"]:
            batch_data[k] = np.concatenate(v, axis=0)
        elif k in ["stage_one_output_boxes"]:
            batch_data[k] = np.array(v)
        elif k in ["coordinates"]:
            coors = []
            for i, coor in enumerate(v):
                coor_pad = np.pad(coor, ((0, 0), (1, 0)),
                                  mode="constant",
                                  constant_values=i)
                coors.append(coor_pad)
            batch_data[k] = np.concatenate(coors, axis=0)

    for k, v in batch_data.items():
        if k in ["coordinates", "num_points"]:
            batch_data[k] = torch.tensor(v, dtype=torch.int32, device=device)
        elif k in ["voxels", "stage_one_output_boxes"]:
            batch_data[k] = torch.tensor(v, dtype=torch.float32, device=device)

    if training:
        if (not cnt == (batch_data["coordinates"][:, 0].max() + 1) ==
                batch_data["targets"].shape[0]):
            import pdb

            pdb.set_trace()

    batch_data["shape"] = fixed_size

    return batch_data
Ejemplo n.º 18
0
class Processor_ROS:
    def __init__(self, config_path, model_path):
        self.points = None
        self.config_path = config_path
        self.model_path = model_path
        self.device = None
        self.net = None
        self.voxel_generator = None

        self.lidar_deque = deque(maxlen=5)
        self.current_frame = {
            "lidar_stamp": None,
            "lidar_seq": None,
            "points": None,
            "odom_seq": None,
            "odom_stamp": None,
            "translation": None,
            "rotation": None
        }
        self.pc_list = deque(maxlen=5)
        self.inputs = None

    def initialize(self):
        self.read_config()

    def read_config(self):
        config_path = self.config_path
        cfg = Config.fromfile(self.config_path)
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        self.net.load_state_dict(torch.load(self.model_path)["state_dict"])
        self.net = self.net.to(self.device).eval()

        self.range = cfg.voxel_generator.range
        self.voxel_size = cfg.voxel_generator.voxel_size
        self.max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel
        self.max_voxel_num = cfg.voxel_generator.max_voxel_num
        self.voxel_generator = VoxelGenerator(
            voxel_size=self.voxel_size,
            point_cloud_range=self.range,
            max_num_points=self.max_points_in_voxel,
            max_voxels=self.max_voxel_num,
        )
        # nuscenes dataset
        lidar2imu_t = np.array([0.985793, 0.0, 1.84019])
        lidar2imu_r = Quaternion([0.706749235, -0.01530099378, 0.0173974518, -0.7070846])

        ## UDI dataset
        # lidar2imu_t = np.array([1.50, 0., 1.42])
        # lidar2imu_r = Quaternion([1., 0., 0., 0.])
        self.lidar2imu = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=True)
        self.imu2lidar = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=False)

    def run(self):

        # print(f"input points shape: {points.shape}")
        # num_features = 5
        # self.points = points.reshape([-1, num_features])

        voxels, coords, num_points = self.voxel_generator.generate(self.points)
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
        grid_size = self.voxel_generator.grid_size
        coords = np.pad(coords, ((0, 0), (1, 0)),
                        mode='constant', constant_values=0)

        voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)
        coords = torch.tensor(coords, dtype=torch.int32, device=self.device)
        num_points = torch.tensor(
            num_points, dtype=torch.int32, device=self.device)
        num_voxels = torch.tensor(
            num_voxels, dtype=torch.int32, device=self.device)
        # grid_size = torch.tensor(grid_size, dtype=torch.float32, device=self.device)

        # t = time.time()
        self.inputs = dict(
            voxels=voxels,
            num_points=num_points,
            num_voxels=num_voxels,
            coordinates=coords,
            shape=grid_size
        )
        torch.cuda.synchronize()
        t = time.time()

        outputs = self.net(self.inputs)[0]

        torch.cuda.synchronize()
        print("  network predict time cost:", time.time() - t)

        outputs = remove_low_score_nu(outputs, 0.45)

        boxes_lidar = outputs["box3d_lidar"].detach().cpu().numpy()
        print("  predict boxes:", boxes_lidar.shape)

        scores = outputs["scores"].detach().cpu().numpy()
        types = outputs["label_preds"].detach().cpu().numpy()

        boxes_lidar[:, -1] = -boxes_lidar[:, -1] - np.pi / 2

        return scores, boxes_lidar, types

    def get_lidar_data(self, input_points: dict):
        print("get one frame lidar data.")
        self.current_frame["lidar_stamp"] = input_points['stamp']
        self.current_frame["lidar_seq"] = input_points['seq']
        self.current_frame["points"] = input_points['points'].T   
        self.lidar_deque.append(deepcopy(self.current_frame))
        if len(self.lidar_deque) == 5:

            ref_from_car = self.imu2lidar
            car_from_global = transform_matrix(self.lidar_deque[-1]['translation'], self.lidar_deque[-1]['rotation'], inverse=True)

            ref_from_car_gpu = cp.asarray(ref_from_car)
            car_from_global_gpu = cp.asarray(car_from_global)

            for i in range(len(self.lidar_deque) - 1):
                last_pc = self.lidar_deque[i]['points']
                last_pc_gpu = cp.asarray(last_pc)

                global_from_car = transform_matrix(self.lidar_deque[i]['translation'], self.lidar_deque[i]['rotation'], inverse=False)
                car_from_current = self.lidar2imu
                global_from_car_gpu = cp.asarray(global_from_car)
                car_from_current_gpu = cp.asarray(car_from_current)

                transform = reduce(
                    cp.dot,
                    [ref_from_car_gpu, car_from_global_gpu, global_from_car_gpu, car_from_current_gpu],
                )
                # tmp_1 = cp.dot(global_from_car_gpu, car_from_current_gpu)
                # tmp_2 = cp.dot(car_from_global_gpu, tmp_1)
                # transform = cp.dot(ref_from_car_gpu, tmp_2)

                last_pc_gpu = cp.vstack((last_pc_gpu[:3, :], cp.ones(last_pc_gpu.shape[1])))
                last_pc_gpu = cp.dot(transform, last_pc_gpu)

                self.pc_list.append(last_pc_gpu[:3, :])

            current_pc = self.lidar_deque[-1]['points']
            current_pc_gpu = cp.asarray(current_pc)
            self.pc_list.append(current_pc_gpu[:3,:])

            all_pc = np.zeros((5, 0), dtype=float)
            for i in range(len(self.pc_list)):
                tmp_pc = cp.vstack((self.pc_list[i], cp.zeros((2, self.pc_list[i].shape[1]))))
                tmp_pc = cp.asnumpy(tmp_pc)
                ref_timestamp = self.lidar_deque[-1]['lidar_stamp'].to_sec()
                timestamp = self.lidar_deque[i]['lidar_stamp'].to_sec()
                tmp_pc[3, ...] = self.lidar_deque[i]['points'][3, ...]
                tmp_pc[4, ...] = ref_timestamp - timestamp
                all_pc = np.hstack((all_pc, tmp_pc))
            
            all_pc = all_pc.T
            print(f" concate pointcloud shape: {all_pc.shape}")

            self.points = all_pc
            sync_cloud = xyz_array_to_pointcloud2(all_pc[:, :3], stamp=self.lidar_deque[-1]["lidar_stamp"], frame_id="lidar_top")
            pub_sync_cloud.publish(sync_cloud)
            return True

    def get_odom_data(self, input_odom):

        self.current_frame["odom_stamp"] = input_odom.header.stamp
        self.current_frame["odom_seq"] = input_odom.header.seq
        x_t = input_odom.pose.pose.position.x
        y_t = input_odom.pose.pose.position.y
        z_t = input_odom.pose.pose.position.z
        self.current_frame["translation"] = np.array([x_t, y_t, z_t])
        x_r = input_odom.pose.pose.orientation.x
        y_r = input_odom.pose.pose.orientation.y
        z_r = input_odom.pose.pose.orientation.z
        w_r = input_odom.pose.pose.orientation.w
        self.current_frame["rotation"] = Quaternion([w_r, x_r, y_r, z_r])