Ejemplo n.º 1
0
def sponv_process_pointcloud(point_cloud, template=False, is_testset=False):
    if not is_testset:
        point_cloud = shuffle_points(point_cloud)
        max_number_of_voxel = 16000
    else:
        max_number_of_voxel = 40000
    try:
        from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
    except:
        from spconv.utils import VoxelGenerator
    if template:
        point_cloud_range = [cfg.TEMPLATE_X_MIN, cfg.TEMPLATE_Y_MIN, cfg.TEMPLATE_Z_MIN, cfg.TEMPLATE_X_MAX, cfg.TEMPLATE_Y_MAX, cfg.TEMPLATE_Z_MAX]
    else:
        point_cloud_range = [cfg.SCENE_X_MIN, cfg.SCENE_Y_MIN, cfg.SCENE_Z_MIN, cfg.SCENE_X_MAX, cfg.SCENE_Y_MAX, cfg.SCENE_Z_MAX]
    voxel_size = np.array([cfg.VOXEL_X_SIZE, cfg.VOXEL_Y_SIZE, cfg.VOXEL_Z_SIZE], dtype = np.float32)
    max_point_number = cfg.VOXEL_POINT_COUNT


    voxel_generator = VoxelGenerator(
        voxel_size=voxel_size,
        point_cloud_range=point_cloud_range,
        max_num_points=max_point_number,
        max_voxels=max_number_of_voxel
    )
    voxel_output = voxel_generator.generate(point_cloud)
    if isinstance(voxel_output, dict):
        voxels, coordinates, num_points = \
            voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
    else:
        voxels, coordinates, num_points = voxel_output

    voxel_dict = {'feature_buffer': voxels,
                  'coordinate_buffer': coordinates,
                  'number_buffer': num_points}
    return voxel_dict
Ejemplo n.º 2
0
    def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None):
        if data_dict is None:
            try:
                from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            except:
                from spconv.utils import VoxelGenerator

            voxel_generator = VoxelGenerator(
                voxel_size=config.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=config.MAX_POINTS_PER_VOXEL,
                max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]
            )
            grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = config.VOXEL_SIZE
            return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator)

        points = data_dict['points']
        voxel_output = voxel_generator.generate(points)
        if isinstance(voxel_output, dict):
            voxels, coordinates, num_points = \
                voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
        else:
            voxels, coordinates, num_points = voxel_output

        if not data_dict['use_lead_xyz']:
            voxels = voxels[..., 3:]  # remove xyz in voxels(N, 3)

        data_dict['voxels'] = voxels
        data_dict['voxel_coords'] = coordinates
        data_dict['voxel_num_points'] = num_points
        return data_dict
Ejemplo n.º 3
0
    def transform_augmented_lidar_points_to_voxels(self,
                                                   data_dict=None,
                                                   config=None,
                                                   voxel_generator=None):
        if data_dict is None:
            try:
                from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            except:
                from spconv.utils import VoxelGenerator
            voxel_generator = VoxelGenerator(
                voxel_size=config.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=config.MAX_POINTS_PER_VOXEL,
                max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode])
            grid_size = (self.point_cloud_range[3:6] -
                         self.point_cloud_range[0:3]) / np.array(
                             config.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = config.VOXEL_SIZE
            self.voxel_augmented_lidar_labels = config.VOXEL_AUGMENTED_LIDAR_LABELS
            return partial(self.transform_augmented_lidar_points_to_voxels,
                           voxel_generator=voxel_generator)

        points = data_dict['points']
        voxel_output = voxel_generator.generate(points)
        if isinstance(voxel_output, dict):
            voxels, coordinates, num_points = \
                voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
        else:
            voxels, coordinates, num_points = voxel_output

        assert data_dict[
            'use_lead_xyz'], 'use_lead_xyz is set False! xyz should be used for voxelization!'

        if self.mode == 'train':
            voxel_labels = voxels[
                ...,
                3:]  #[M, max_points, ndim] float tensor. only contain points
            assert voxel_labels.shape[-1] == len(self.voxel_augmented_lidar_labels), \
            'voxel_labels.shape[-1]='+str(voxel_labels.shape[-1])+' len(self.voxel_augmented_lidar_labels)='+str(len(self.voxel_augmented_lidar_labels))
            for idx, x in enumerate(self.voxel_augmented_lidar_labels):
                if x == 'drivable_area_bool':
                    data_dict['voxels_drivable_cls_labels'] = np.max(
                        voxel_labels[:, :, idx],
                        axis=1).reshape(-1, 1)  #binary 1 or 0
                elif x == 'lidar_ground_bool':
                    data_dict['voxels_ground_cls_labels'] = np.max(
                        voxel_labels[:, :, idx],
                        axis=1).reshape(-1, 1)  #binary 1 or 0
                elif x == 'ground_height':
                    data_dict['voxels_ground_reg_labels'] = np.min(
                        voxel_labels[:, :, idx], axis=1).reshape(-1, 1)  #float

        voxels = voxels[..., :3]  # use xyz as voxel data

        data_dict['voxels'] = voxels
        data_dict['voxel_coords'] = coordinates
        data_dict['voxel_num_points'] = num_points

        return data_dict
Ejemplo n.º 4
0
    def transform_points_to_voxels(self,
                                   data_dict=None,
                                   config=None,
                                   voxel_generator=None):
        if data_dict is None:
            try:
                from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            except:
                from spconv.utils import VoxelGenerator
            voxel_generator = VoxelGenerator(
                voxel_size=config.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=config.MAX_POINTS_PER_VOXEL,
                max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode])
            grid_size = (self.point_cloud_range[3:6] -
                         self.point_cloud_range[0:3]) / np.array(
                             config.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = config.VOXEL_SIZE
            self.max_voxels = config.MAX_NUMBER_OF_VOXELS[self.mode]
            self.skip_voxel_generator = config.get('SKIP_VOXEL_GENERATOR',
                                                   False)
            return partial(self.transform_points_to_voxels,
                           voxel_generator=voxel_generator)

        if self.skip_voxel_generator:
            points = data_dict['points']
            pc_range_min = np.array(self.point_cloud_range[:3]).reshape(-1, 3)
            voxel_size_array = np.array(self.voxel_size).reshape(-1, 3)
            keep = common_utils.mask_points_by_range_hard(
                points, self.point_cloud_range)
            chosen_points = points[keep]
            chosen_points = chosen_points[:self.max_voxels, :]
            coords = (chosen_points[:, :3] - pc_range_min) // voxel_size_array
            coords = coords.astype(int)
            num_points = np.ones(chosen_points.shape[0])
            data_dict['voxels'] = chosen_points
            data_dict['voxel_coords'] = coords[:, [2, 1, 0]]
            data_dict['voxel_num_points'] = num_points
        else:
            points = data_dict['points']
            voxel_output = voxel_generator.generate(points)
            if isinstance(voxel_output, dict):
                voxels, coordinates, num_points = \
                    voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
            else:
                voxels, coordinates, num_points = voxel_output

            if not data_dict['use_lead_xyz']:
                voxels = voxels[..., 3:]  # remove xyz in voxels(N, 3)

            data_dict['voxels'] = voxels
            data_dict['voxel_coords'] = coordinates
            data_dict['voxel_num_points'] = num_points
        return data_dict
Ejemplo n.º 5
0
    def transform_points_to_voxels(self,
                                   data_dict=None,
                                   config=None,
                                   voxel_generator=None):
        if data_dict is None:
            try:
                from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            except:
                from spconv.utils import VoxelGenerator

            voxel_generator = VoxelGenerator(
                voxel_size=config.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=config.MAX_POINTS_PER_VOXEL,
                max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode])
            grid_size = (self.point_cloud_range[3:6] -
                         self.point_cloud_range[0:3]) / np.array(
                             config.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = config.VOXEL_SIZE
            return partial(self.transform_points_to_voxels,
                           voxel_generator=voxel_generator)

        points = data_dict['points']
        voxel_output = voxel_generator.generate(points)
        if isinstance(voxel_output, dict):
            voxels, coordinates, num_points = \
                voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
        else:
            voxels, coordinates, num_points = voxel_output

        if not data_dict['use_lead_xyz']:
            voxels = voxels[..., 3:]  # remove xyz in voxels(N, 3)

        data_dict['voxels'] = voxels
        data_dict['voxel_coords'] = coordinates

        data_dict['voxel_num_points'] = num_points
        voxel_features, voxel_num_points = data_dict['voxels'], data_dict[
            'voxel_num_points']
        points_mean = np.sum(voxel_features[:, :, :], axis=1, keepdims=False)
        num_points = num_points.astype(voxels.dtype).reshape((-1, 1))
        normalizer = np.clip(num_points, a_min=1.0, a_max=6.0)
        points_mean = points_mean / normalizer
        data_dict['voxel_features'] = points_mean

        return data_dict
Ejemplo n.º 6
0
class VoxelGeneratorWrapper():
    def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels):
        try:
            from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            self.spconv_ver = 1
        except:
            try:
                from spconv.utils import VoxelGenerator
                self.spconv_ver = 1
            except:
                from spconv.utils import Point2VoxelCPU3d as VoxelGenerator
                self.spconv_ver = 2

        if self.spconv_ver == 1:
            self._voxel_generator = VoxelGenerator(
                voxel_size=vsize_xyz,
                point_cloud_range=coors_range_xyz,
                max_num_points=max_num_points_per_voxel,
                max_voxels=max_num_voxels
            )
        else:
            self._voxel_generator = VoxelGenerator(
                vsize_xyz=vsize_xyz,
                coors_range_xyz=coors_range_xyz,
                num_point_features=num_point_features,
                max_num_points_per_voxel=max_num_points_per_voxel,
                max_num_voxels=max_num_voxels
            )

    def generate(self, points):
        if self.spconv_ver == 1:
            voxel_output = self._voxel_generator.generate(points)
            if isinstance(voxel_output, dict):
                voxels, coordinates, num_points = \
                    voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
            else:
                voxels, coordinates, num_points = voxel_output
        else:
            assert tv is not None, f"Unexpected error, library: 'cumm' wasn't imported properly."
            voxel_output = self._voxel_generator.point_to_voxel(tv.from_numpy(points))
            tv_voxels, tv_coordinates, tv_num_points = voxel_output
            # make copy with numpy(), since numpy_view() will disappear as soon as the generator is deleted
            voxels = tv_voxels.numpy()
            coordinates = tv_coordinates.numpy()
            num_points = tv_num_points.numpy()
        return voxels, coordinates, num_points
Ejemplo n.º 7
0
    def transform_points_to_rangevoxels(self,
                                        data_dict=None,
                                        config=None,
                                        voxel_generator=None):
        if data_dict is None:
            from sphconv import VoxelGenerator

            voxel_generator = VoxelGenerator(config.v_res, config.h_res,
                                             config.d_res, config.v_range,
                                             config.h_range, config.d_range,
                                             config.log)

            self.grid_size = np.array(
                [config.d_res, config.h_res, config.v_res]).astype(np.int64)
            return partial(self.transform_points_to_rangevoxels,
                           voxel_generator=voxel_generator)

        points = data_dict['points']
        rangeV = voxel_generator.generate(points)
        data_dict['rangeV'] = rangeV
        return data_dict
class DepthContrastDataset(Dataset):
    """Base Self Supervised Learning Dataset Class."""
    def __init__(self, cfg):
        self.split = "train"  ### Default is training
        self.label_objs = []
        self.data_paths = []
        self.label_paths = []
        self.cfg = cfg
        self.batchsize_per_replica = cfg["BATCHSIZE_PER_REPLICA"]
        self.label_sources = []  #cfg["LABEL_SOURCES"]
        self.dataset_names = cfg["DATASET_NAMES"]
        self.label_type = cfg["LABEL_TYPE"]
        self.AUGMENT_COORDS_TO_FEATS = False  #optional
        self._labels_init = False
        self._get_data_files("train")
        self.data_objs = np.load(
            self.data_paths[0])  ### Only load the first one for now

        #### Add the voxelizer here
        if ("Lidar" in cfg) and cfg["VOX"]:
            self.VOXEL_SIZE = [0.1, 0.1, 0.2]

            self.point_cloud_range = POINT_RANGE  #np.array([  0. , -75. ,  -3. ,  75.0,  75. ,   3. ], dtype=np.float32)
            self.MAX_POINTS_PER_VOXEL = 5
            self.MAX_NUMBER_OF_VOXELS = 16000
            self.voxel_generator = VoxelGenerator(
                voxel_size=self.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=self.MAX_POINTS_PER_VOXEL,
                max_voxels=self.MAX_NUMBER_OF_VOXELS)
            grid_size = (self.point_cloud_range[3:6] -
                         self.point_cloud_range[0:3]) / np.array(
                             self.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = self.VOXEL_SIZE
        elif cfg["VOX"]:
            augment_data = (self.split == "TRAIN")
            #### Vox parameters here
            self.VOXEL_SIZE = 0.05  #0.02 # 5cm
            self.CLIP_BOUND = None  #(-1000, -1000, -1000, 1000, 1000, 1000)

            self.data_aug_color_trans_ratio = 0.1
            self.data_aug_color_jitter_std = 0.05
            self.ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))

            if augment_data:
                self.prevoxel_transform_train = []
                self.prevoxel_transform_train.append(
                    transforms.ElasticDistortion(self.ELASTIC_DISTORT_PARAMS))
                self.prevoxel_transform = transforms.Compose(
                    self.prevoxel_transform_train)

                self.input_transforms = []
                self.input_transforms += [
                    transforms.RandomDropout(0.2),
                    transforms.RandomHorizontalFlip('z', False),
                    #transforms.ChromaticAutoContrast(),
                    transforms.ChromaticTranslation(
                        self.data_aug_color_trans_ratio),
                    transforms.ChromaticJitter(self.data_aug_color_jitter_std),
                    # t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
                ]
                self.input_transforms = transforms.Compose(
                    self.input_transforms)

            # Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
            # augmentation has to be done before voxelization
            self.SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
            self.ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64),
                                                (-np.pi / 64,
                                                 np.pi / 64), (-np.pi, np.pi))
            self.TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2),
                                                         (-0.2, 0.2), (0, 0))

            self.voxelizer = Voxelizer(
                voxel_size=self.VOXEL_SIZE,
                clip_bound=self.CLIP_BOUND,
                use_augmentation=augment_data,
                scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
                rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
                translation_augmentation_ratio_bound=self.
                TRANSLATION_AUGMENTATION_RATIO_BOUND,
                ignore_label=True)

    def _get_data_files(self, split):
        local_rank = int(os.environ.get("LOCAL_RANK", 0))

        self.data_paths = self.cfg["DATA_PATHS"]
        self.label_paths = []

        logging.info(f"Rank: {local_rank} Data files:\n{self.data_paths}")
        logging.info(f"Rank: {local_rank} Label files:\n{self.label_paths}")

    def _augment_coords_to_feats(self, coords, feats, labels=None):
        # Center x,y
        coords_center = coords.mean(0, keepdims=True)
        coords_center[0, 2] = 0
        norm_coords = coords - coords_center
        feats = np.concatenate((feats, norm_coords), 1)
        return coords, feats, labels

    def toVox(self, coords, feats, labels):
        if "Lidar" in self.cfg:
            voxel_output = self.voxel_generator.generate(coords)
            if isinstance(voxel_output, dict):
                voxels, coordinates, num_points = \
                                                  voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
            else:
                voxels, coordinates, num_points = voxel_output

            data_dict = {}
            data_dict['voxels'] = voxels
            data_dict['voxel_coords'] = coordinates
            data_dict['voxel_num_points'] = num_points
            return data_dict
        else:
            precoords = np.copy(coords)
            prefeats = np.copy(feats)
            if (self.split == "TRAIN") and (self.prevoxel_transform
                                            is not None):
                coords, feats, labels = self.prevoxel_transform(
                    coords, feats, labels)
            coords, feats, labels, transformation = self.voxelizer.voxelize(
                coords, feats, labels)
            if (self.split == "TRAIN") and (self.input_transforms is not None):
                try:
                    coords, feats, labels = self.input_transforms(
                        coords, feats, labels)
                except:
                    print("error with: ", coords.shape)
                    coords = np.zeros((100, 3), dtype=np.int32)
                    feats = np.zeros((100, 3), dtype=np.float64)
                    labels = np.zeros((100, ), dtype=np.int32)
            if (self.split == "TRAIN") and (self.AUGMENT_COORDS_TO_FEATS):
                coords, feats, labels = self._augment_coords_to_feats(
                    coords, feats, labels)
            return (coords, feats, labels)

    def load_data(self, idx):
        is_success = True
        point_path = self.data_objs[idx]
        try:
            if "Lidar" in self.cfg:
                #point = np.load(point_path)
                point = np.fromfile(str(point_path), dtype=np.float32).reshape(
                    -1, 4)  #np.load(point_path)
                if point.shape[1] != 4:
                    temp = np.zeros((point.shape[0], 4))
                    temp[:, :3] = point
                    point = np.copy(temp)

                upper_idx = np.sum(
                    (point[:, 0:3] <= POINT_RANGE[3:6]).astype(np.int32),
                    1) == 3
                lower_idx = np.sum(
                    (point[:, 0:3] >= POINT_RANGE[0:3]).astype(np.int32),
                    1) == 3

                new_pointidx = (upper_idx) & (lower_idx)
                point = point[new_pointidx, :]
            else:
                point = np.load(point_path)
                ### Add height
                floor_height = np.percentile(point[:, 2], 0.99)
                height = point[:, 2] - floor_height
                point = np.concatenate([point, np.expand_dims(height, 1)], 1)
        except Exception as e:
            logging.warn(
                f"Couldn't load: {self.point_dataset[idx]}. Exception: \n{e}")
            point = np.zeros([50000, 7])
            is_success = False
        return point, is_success

    def __getitem__(self, idx):

        cfg = self.cfg
        # TODO: this doesn't yet handle the case where the length of datasets
        # could be different.
        if cfg["DATA_TYPE"] == "point_vox":
            item = {
                "data": [],
                "data_valid": [],
                "data_moco": [],
                "vox": [],
                "vox_moco": []
            }

            data, valid = self.load_data(idx)
            item["data"].append(data)
            item["data_moco"].append(np.copy(data))
            item["vox"].append(np.copy(data))
            item["vox_moco"].append(np.copy(data))
            item["data_valid"].append(1 if valid else -1)
        else:
            item = {
                "data": [],
                "data_moco": [],
                "data_valid": [],
                "data_idx": []
            }

            data, valid = self.load_data(idx)
            item["data"].append(data)
            item["data_moco"].append(np.copy(data))
            item["data_valid"].append(1 if valid else -1)

        ### Make copies for moco setting
        item["label"] = []
        item["label"].append(idx)

        ### Apply the transformation here
        if (cfg["DATA_TYPE"] == "point_vox"):
            tempitem = {"data": item["data"]}
            tempdata = get_transform3d(tempitem, cfg["POINT_TRANSFORMS"])
            item["data"] = tempdata["data"]

            tempitem = {"data": item["data_moco"]}
            tempdata = get_transform3d(tempitem, cfg["POINT_TRANSFORMS"])
            item["data_moco"] = tempdata["data"]

            tempitem = {"data": item["vox"]}
            tempdata = get_transform3d(tempitem,
                                       cfg["POINT_TRANSFORMS"],
                                       vox=True)
            coords = tempdata["data"][0][:, :3]
            feats = tempdata["data"][
                0][:, 3:6] * 255.0  #np.ones(coords.shape)*255.0
            labels = np.zeros(coords.shape[0]).astype(np.int32)
            item["vox"] = [self.toVox(coords, feats, labels)]

            tempitem = {"data": item["vox_moco"]}
            tempdata = get_transform3d(tempitem,
                                       cfg["POINT_TRANSFORMS"],
                                       vox=True)
            coords = tempdata["data"][0][:, :3]
            feats = tempdata["data"][
                0][:, 3:6] * 255.0  #np.ones(coords.shape)*255.0
            labels = np.zeros(coords.shape[0]).astype(np.int32)
            item["vox_moco"] = [self.toVox(coords, feats, labels)]
        else:
            tempitem = {"data": item["data"]}
            tempdata = get_transform3d(tempitem,
                                       cfg["POINT_TRANSFORMS"],
                                       vox=cfg["VOX"])
            if cfg["VOX"]:
                coords = tempdata["data"][0][:, :3]
                feats = tempdata["data"][0][:, 3:6] * 255.0
                labels = np.zeros(coords.shape[0]).astype(np.int32)
                item["data"] = [self.toVox(coords, feats, labels)]
            else:
                item["data"] = tempdata["data"]

            tempitem = {"data": item["data_moco"]}
            tempdata = get_transform3d(tempitem,
                                       cfg["POINT_TRANSFORMS"],
                                       vox=cfg["VOX"])
            if cfg["VOX"]:
                coords = tempdata["data"][0][:, :3]
                feats = tempdata["data"][
                    0][:, 3:6] * 255.0  #np.ones(coords.shape)*255.0
                labels = np.zeros(coords.shape[0]).astype(np.int32)
                item["data_moco"] = [self.toVox(coords, feats, labels)]
            else:
                item["data_moco"] = tempdata["data"]

        return item

    def __len__(self):
        return len(self.data_objs)

    def get_available_splits(self, dataset_config):
        return [
            key for key in dataset_config if key.lower() in ["train", "test"]
        ]

    def num_samples(self, source_idx=0):
        return len(self.data_objs)

    def get_batchsize_per_replica(self):
        # this searches for batchsize_per_replica in self and then in self.dataset
        return getattr(self, "batchsize_per_replica", 1)

    def get_global_batchsize(self):
        if torch.distributed.is_available(
        ) and torch.distributed.is_initialized():
            world_size = torch.distributed.get_world_size()
        else:
            world_size = 1
        return self.get_batchsize_per_replica() * world_size
Ejemplo n.º 9
0
    def transform_points_to_voxels(self,
                                   data_dict=None,
                                   config=None,
                                   voxel_generator=None,
                                   voxel_generator_2=None):
        if data_dict is None:
            try:
                from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
            except:
                from spconv.utils import VoxelGenerator

            voxel_generator = VoxelGenerator(
                voxel_size=config.VOXEL_SIZE,
                point_cloud_range=self.point_cloud_range,
                max_num_points=config.MAX_POINTS_PER_VOXEL,
                max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode])
            #voxel_generator_2 = VoxelGenerator(
            #    voxel_size=config.VOXEL_SIZE,
            #    point_cloud_range=self.point_cloud_range,
            #    max_num_points=config.MAX_POINTS_PER_VOXEL,
            #    max_voxels=60000
            #)
            grid_size = (self.point_cloud_range[3:6] -
                         self.point_cloud_range[0:3]) / np.array(
                             config.VOXEL_SIZE)
            self.grid_size = np.round(grid_size).astype(np.int64)
            self.voxel_size = config.VOXEL_SIZE
            return partial(self.transform_points_to_voxels,
                           voxel_generator=voxel_generator,
                           voxel_generator_2=voxel_generator_2)

        points = data_dict['points']
        points = data_dict['points_sp']
        indices = data_dict['indices']
        #print(points.shape)
        """
           add code for visibility
        """
        ori_points = points[:, [0, 1, 2, 4]]
        #print(points[:,-1])
        voxel_size = self.voxel_size
        pc_range = self.point_cloud_range
        #print(pc_range)
        #print(self.voxel_size)
        origins = data_dict['origins']
        num_points = points.shape[0]
        num_original = num_points
        time_stamps = np.array([0], dtype=np.float32)
        time_stamps = points[
            indices[:-1],
            -1]  # counting on the fact we do not miss points from any intermediate time_stamps
        time_stamps = (time_stamps[:-1] + time_stamps[1:]) / 2
        time_stamps = [-1000.0] + time_stamps.tolist() + [1000.0
                                                          ]  # add boundaries
        time_stamps = np.array(time_stamps)
        num_original = indices[-1]
        #print(time_stamps)
        #print(points.shape)
        #sys.exit()
        if num_points > num_original:
            #print("this is test sample")
            original_points, sampled_points = ori_points[:
                                                         num_original, :], ori_points[
                                                             num_original:, :]
            visibility, original_mask, sampled_mask = mapping.compute_logodds_and_masks(
                original_points, sampled_points, origins, time_stamps,
                pc_range, min(voxel_size))
            points = np.concatenate(
                (original_points[original_mask], sampled_points[sampled_mask]))
        else:

            visibility = mapping.compute_logodds(ori_points, origins,
                                                 time_stamps, pc_range, 0.2)

        np.set_printoptions(threshold=sys.maxsize)
        visi_map = np.zeros([512, 512, 3])
        visibility = np.int64(visibility)
        visibility = np.reshape(visibility, (40, 512, 512))[0:40, :, :]
        visibility = np.transpose(visibility, (2, 1, 0))
        #print(visibility)
        #sys.exit()
        mask_occ = (visibility >= 1).nonzero()
        #print(mask_occ)
        mask_free = (visibility == 0).nonzero()
        mask_unknown = (visibility == -1).nonzero()
        visi_map[np.int64(mask_free[0]),
                 np.int64(mask_free[1]), :] = np.array([255, 0, 0]) / 255
        visi_map[np.int64(mask_occ[0]),
                 np.int64(mask_occ[1]), :] = np.array([0, 255, 0]) / 255
        visi_map[mask_unknown[0],
                 mask_unknown[1], :] = np.array([0, 0, 255]) / 255
        #print(.shape)
        #visibility = np.pad(visibility, ((0,2),(0,0)), 'edge')
        data_dict['vis'] = visibility
        #print(data_dict.keys())
        dense_points = data_dict['dense_point']
        points = data_dict['points']
        #print(points.shape)
        voxel_output = voxel_generator.generate(points)
        #voxel_dense = voxel_generator.generate(dense_points)
        #print(voxel_dense[...,-1])
        #print(voxel_output['voxels'].shape)
        #sys.exit()
        if isinstance(voxel_output, dict):
            voxels, coordinates, num_points = \
                voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
        else:
            voxels, coordinates, num_points = voxel_output

        if not data_dict['use_lead_xyz']:
            voxels = voxels[..., 3:]  # remove xyz in voxels(N, 3)
        #data_dict['dense_pillar'] = voxel_dense['voxels']
        #data_dict['dense_pillar_coords'] = voxel_dense['coordinates']
        data_dict['voxels'] = voxels
        data_dict['voxel_coords'] = coordinates
        data_dict['voxel_num_points'] = num_points
        return data_dict