def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels): try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator self.spconv_ver = 1 except: try: from spconv.utils import VoxelGenerator self.spconv_ver = 1 except: from spconv.utils import Point2VoxelCPU3d as VoxelGenerator self.spconv_ver = 2 if self.spconv_ver == 1: self._voxel_generator = VoxelGenerator( voxel_size=vsize_xyz, point_cloud_range=coors_range_xyz, max_num_points=max_num_points_per_voxel, max_voxels=max_num_voxels ) else: self._voxel_generator = VoxelGenerator( vsize_xyz=vsize_xyz, coors_range_xyz=coors_range_xyz, num_point_features=num_point_features, max_num_points_per_voxel=max_num_points_per_voxel, max_num_voxels=max_num_voxels )
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator) points = data_dict['points'] voxels, coordinates, num_points = voxel_generator.generate(points) if not data_dict['use_lead_xyz']: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict
def dataset_init(self, class_names, logger): self.db_sampler = None db_sampler_cfg = cfg.DATA_CONFIG.AUGMENTATION.DB_SAMPLER if self.training and db_sampler_cfg.ENABLED: db_infos = [] for db_info_path in db_sampler_cfg.DB_INFO_PATH: db_info_path = cfg.ROOT_DIR / db_info_path with open(str(db_info_path), 'rb') as f: infos = pickle.load(f) if db_infos.__len__() == 0: db_infos = infos else: [ db_infos[cls].extend(infos[cls]) for cls in db_infos.keys() ] self.db_sampler = DataBaseSampler(db_infos=db_infos, sampler_cfg=db_sampler_cfg, class_names=class_names, logger=logger) voxel_generator_cfg = cfg.DATA_CONFIG.VOXEL_GENERATOR self.voxel_generator = VoxelGenerator( voxel_size=voxel_generator_cfg.VOXEL_SIZE, point_cloud_range=cfg.DATA_CONFIG.POINT_CLOUD_RANGE, max_num_points=voxel_generator_cfg.MAX_POINTS_PER_VOXEL)
def transform_augmented_lidar_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE self.voxel_augmented_lidar_labels = config.VOXEL_AUGMENTED_LIDAR_LABELS return partial(self.transform_augmented_lidar_points_to_voxels, voxel_generator=voxel_generator) points = data_dict['points'] voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output assert data_dict[ 'use_lead_xyz'], 'use_lead_xyz is set False! xyz should be used for voxelization!' if self.mode == 'train': voxel_labels = voxels[ ..., 3:] #[M, max_points, ndim] float tensor. only contain points assert voxel_labels.shape[-1] == len(self.voxel_augmented_lidar_labels), \ 'voxel_labels.shape[-1]='+str(voxel_labels.shape[-1])+' len(self.voxel_augmented_lidar_labels)='+str(len(self.voxel_augmented_lidar_labels)) for idx, x in enumerate(self.voxel_augmented_lidar_labels): if x == 'drivable_area_bool': data_dict['voxels_drivable_cls_labels'] = np.max( voxel_labels[:, :, idx], axis=1).reshape(-1, 1) #binary 1 or 0 elif x == 'lidar_ground_bool': data_dict['voxels_ground_cls_labels'] = np.max( voxel_labels[:, :, idx], axis=1).reshape(-1, 1) #binary 1 or 0 elif x == 'ground_height': data_dict['voxels_ground_reg_labels'] = np.min( voxel_labels[:, :, idx], axis=1).reshape(-1, 1) #float voxels = voxels[..., :3] # use xyz as voxel data data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict
def dataset_init(self, class_names, logger): self.db_sampler = None db_sampler_cfg = cfg.DATA_CONFIG.AUGMENTATION.DB_SAMPLER if self.training and db_sampler_cfg.ENABLED: db_infos = [] for db_info_path in db_sampler_cfg.DB_INFO_PATH: db_info_path = cfg.ROOT_DIR / db_info_path with open(str(db_info_path), 'rb') as f: infos = pickle.load(f) if db_infos.__len__() == 0: db_infos = infos else: [db_infos[cls].extend(infos[cls]) for cls in db_infos.keys()] self.db_sampler = DataBaseSampler( db_infos=db_infos, sampler_cfg=db_sampler_cfg, class_names=class_names, logger=logger ) voxel_generator_cfg = cfg.DATA_CONFIG.VOXEL_GENERATOR self.voxel_generator = VoxelGenerator( voxel_size=voxel_generator_cfg.VOXEL_SIZE, point_cloud_range=cfg.DATA_CONFIG.POINT_CLOUD_RANGE, max_num_points=voxel_generator_cfg.MAX_POINTS_PER_VOXEL ) if cfg.MODEL.NAME == 'MVF': self.mvf_feature = PointCloudVoxel(0, voxel_generator_cfg.BEV_FEATURE_SIZE_XY[0], voxel_generator_cfg.BEV_FEATURE_SIZE_XY[1], 1, voxel_generator_cfg.BEV_RANGE[0], voxel_generator_cfg.BEV_RANGE[1], voxel_generator_cfg.BEV_RANGE[2], voxel_generator_cfg.BEV_RANGE[3], voxel_generator_cfg.BEV_RANGE[4], voxel_generator_cfg.BEV_RANGE[5], 0, voxel_generator_cfg.FV_FEATURE_SIZE_ROW_COL[0], voxel_generator_cfg.FV_FEATURE_SIZE_ROW_COL[1], voxel_generator_cfg.FV_RANGE_THETA_PHI[0], voxel_generator_cfg.FV_RANGE_THETA_PHI[1], voxel_generator_cfg.FV_RANGE_THETA_PHI[2], voxel_generator_cfg.FV_RANGE_THETA_PHI[3])
def sponv_process_pointcloud(point_cloud, template=False, is_testset=False): if not is_testset: point_cloud = shuffle_points(point_cloud) max_number_of_voxel = 16000 else: max_number_of_voxel = 40000 try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator if template: point_cloud_range = [cfg.TEMPLATE_X_MIN, cfg.TEMPLATE_Y_MIN, cfg.TEMPLATE_Z_MIN, cfg.TEMPLATE_X_MAX, cfg.TEMPLATE_Y_MAX, cfg.TEMPLATE_Z_MAX] else: point_cloud_range = [cfg.SCENE_X_MIN, cfg.SCENE_Y_MIN, cfg.SCENE_Z_MIN, cfg.SCENE_X_MAX, cfg.SCENE_Y_MAX, cfg.SCENE_Z_MAX] voxel_size = np.array([cfg.VOXEL_X_SIZE, cfg.VOXEL_Y_SIZE, cfg.VOXEL_Z_SIZE], dtype = np.float32) max_point_number = cfg.VOXEL_POINT_COUNT voxel_generator = VoxelGenerator( voxel_size=voxel_size, point_cloud_range=point_cloud_range, max_num_points=max_point_number, max_voxels=max_number_of_voxel ) voxel_output = voxel_generator.generate(point_cloud) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output voxel_dict = {'feature_buffer': voxels, 'coordinate_buffer': coordinates, 'number_buffer': num_points} return voxel_dict
def initialize_model(args): global model, voxel_generator cfg = Config.fromfile(args.config) model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) if args.checkpoint is not None: load_checkpoint(model, args.checkpoint, map_location="cpu") # print(model) if args.fp16: print("cast model to fp16") model = model.half() model = model.cuda() model.eval() global device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") range = cfg.voxel_generator.range voxel_size = cfg.voxel_generator.voxel_size max_points_in_voxel = cfg.voxel_generator.max_points_in_voxel max_voxel_num = cfg.voxel_generator.max_voxel_num[1] voxel_generator = VoxelGenerator(voxel_size=voxel_size, point_cloud_range=range, max_num_points=max_points_in_voxel, max_voxels=max_voxel_num) return model
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode] ) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator) points = data_dict['points'] color = get_color(data_dict) points = np.concatenate([points, color], axis=1) voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output if not data_dict['use_lead_xyz']: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE self.max_voxels = config.MAX_NUMBER_OF_VOXELS[self.mode] self.skip_voxel_generator = config.get('SKIP_VOXEL_GENERATOR', False) return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator) if self.skip_voxel_generator: points = data_dict['points'] pc_range_min = np.array(self.point_cloud_range[:3]).reshape(-1, 3) voxel_size_array = np.array(self.voxel_size).reshape(-1, 3) keep = common_utils.mask_points_by_range_hard( points, self.point_cloud_range) chosen_points = points[keep] chosen_points = chosen_points[:self.max_voxels, :] coords = (chosen_points[:, :3] - pc_range_min) // voxel_size_array coords = coords.astype(int) num_points = np.ones(chosen_points.shape[0]) data_dict['voxels'] = chosen_points data_dict['voxel_coords'] = coords[:, [2, 1, 0]] data_dict['voxel_num_points'] = num_points else: points = data_dict['points'] voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output if not data_dict['use_lead_xyz']: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): # 在类初始化时调用 if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator) points = data_dict['points'] # res = { # "voxels": voxels, # "coordinates": coors, # "num_points_per_voxel": num_points_per_voxel, # "voxel_point_mask": voxel_point_mask, # "voxel_num": voxel_num # "voxel_point_mask": res["voxel_point_mask"].reshape( # -1, max_points, 1) # } voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output if not data_dict['use_lead_xyz']: # True voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator) points = data_dict['points'] voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output if not data_dict['use_lead_xyz']: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points voxel_features, voxel_num_points = data_dict['voxels'], data_dict[ 'voxel_num_points'] points_mean = np.sum(voxel_features[:, :, :], axis=1, keepdims=False) num_points = num_points.astype(voxels.dtype).reshape((-1, 1)) normalizer = np.clip(num_points, a_min=1.0, a_max=6.0) points_mean = points_mean / normalizer data_dict['voxel_features'] = points_mean return data_dict
def transform_points_to_rangevoxels(self, data_dict=None, config=None, voxel_generator=None): if data_dict is None: from sphconv import VoxelGenerator voxel_generator = VoxelGenerator(config.v_res, config.h_res, config.d_res, config.v_range, config.h_range, config.d_range, config.log) self.grid_size = np.array( [config.d_res, config.h_res, config.v_res]).astype(np.int64) return partial(self.transform_points_to_rangevoxels, voxel_generator=voxel_generator) points = data_dict['points'] rangeV = voxel_generator.generate(points) data_dict['rangeV'] = rangeV return data_dict
def build(voxel_config): """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(voxel_config, (voxel_generator_pb2.VoxelGenerator)): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') voxel_generator = VoxelGenerator( voxel_size=list(voxel_config.voxel_size), point_cloud_range=list(voxel_config.point_cloud_range), max_num_points=voxel_config.max_number_of_points_per_voxel, max_voxels=20000) return voxel_generator
def noise_robustness(batch_size, ret): """ Add noise points around every ground truth box for SECOND and Point-RCNN. The idea is proposed in TANet (AAAI 2020), https://arxiv.org/pdf/1912.05163.pdf. How to use: For now, this function should be added to line 179 of pcdet/datasets/dataset.py TODO: Integrate with pcdet/datasets/dataset.py Change no_of_pts to number of uniformly generated noise points desired around each GT bounding box. :param batch_size: :param ret: :return: """ if True: np.random.seed(0) # Numpy module. voxels_flag = True no_of_pts = 100 voxel_list = [] coords_list = [] num_points_list = [] for k in range(batch_size): for i_box in range(len(ret['gt_boxes'][k])): bbox = ret['gt_boxes'][k][i_box] cx = bbox[0] cy = bbox[1] cz = bbox[2] l = bbox[3] w = bbox[4] h = bbox[5] z1 = np.random.uniform(cx + l // 2, cx + 3 * l, (no_of_pts // 2, 1)) z2 = np.random.uniform(cx - l // 2, cx - 3 * l, (no_of_pts // 2, 1)) z = np.concatenate([z1, z2], 0) y1 = np.random.uniform(cy + w // 2, cy + 3 * w, (no_of_pts // 2, 1)) y2 = np.random.uniform(cy - w // 2, cy - 3 * w, (no_of_pts // 2, 1)) y = np.concatenate([y1, y2], 0) x1 = np.random.uniform(cz + h // 2, cz + 3 * h, (no_of_pts // 2, 1)) x2 = np.random.uniform(cz - h // 2, cz - 3 * h, (no_of_pts // 2, 1)) x = np.concatenate([x1, x2], 0) r = np.ones([no_of_pts, 1]) b = np.zeros([no_of_pts, 1]) + k noise = np.concatenate([b, z, y, x, r], 1) ret['points'] = np.concatenate([ret['points'], noise], 0) if voxels_flag: voxel_generator = VoxelGenerator( voxel_size=[0.05, 0.05, 0.1], point_cloud_range=[0, -40, -3, 70.4, 40, 1], max_num_points=5, max_voxels=40000, ) batch_mask = ret['points'][:, 0] == k points_extract = ret['points'][batch_mask, :] voxels, coordinates, num_points = voxel_generator.generate( points_extract[:, 1:]) voxel_list.append(voxels) coords_list.append(coordinates) num_points_list.append(num_points) if voxels_flag: ret['voxels'] = np.concatenate(voxel_list, axis=0) ret['voxel_num_points'] = np.concatenate(num_points_list, axis=0) coors = [] for i, coor in enumerate(coords_list): coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i) coors.append(coor_pad) ret['voxel_coords'] = np.concatenate(coors, axis=0) return ret
def __init__(self, cfg): self.split = "train" ### Default is training self.label_objs = [] self.data_paths = [] self.label_paths = [] self.cfg = cfg self.batchsize_per_replica = cfg["BATCHSIZE_PER_REPLICA"] self.label_sources = [] #cfg["LABEL_SOURCES"] self.dataset_names = cfg["DATASET_NAMES"] self.label_type = cfg["LABEL_TYPE"] self.AUGMENT_COORDS_TO_FEATS = False #optional self._labels_init = False self._get_data_files("train") self.data_objs = np.load( self.data_paths[0]) ### Only load the first one for now #### Add the voxelizer here if ("Lidar" in cfg) and cfg["VOX"]: self.VOXEL_SIZE = [0.1, 0.1, 0.2] self.point_cloud_range = POINT_RANGE #np.array([ 0. , -75. , -3. , 75.0, 75. , 3. ], dtype=np.float32) self.MAX_POINTS_PER_VOXEL = 5 self.MAX_NUMBER_OF_VOXELS = 16000 self.voxel_generator = VoxelGenerator( voxel_size=self.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=self.MAX_POINTS_PER_VOXEL, max_voxels=self.MAX_NUMBER_OF_VOXELS) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( self.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = self.VOXEL_SIZE elif cfg["VOX"]: augment_data = (self.split == "TRAIN") #### Vox parameters here self.VOXEL_SIZE = 0.05 #0.02 # 5cm self.CLIP_BOUND = None #(-1000, -1000, -1000, 1000, 1000, 1000) self.data_aug_color_trans_ratio = 0.1 self.data_aug_color_jitter_std = 0.05 self.ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6)) if augment_data: self.prevoxel_transform_train = [] self.prevoxel_transform_train.append( transforms.ElasticDistortion(self.ELASTIC_DISTORT_PARAMS)) self.prevoxel_transform = transforms.Compose( self.prevoxel_transform_train) self.input_transforms = [] self.input_transforms += [ transforms.RandomDropout(0.2), transforms.RandomHorizontalFlip('z', False), #transforms.ChromaticAutoContrast(), transforms.ChromaticTranslation( self.data_aug_color_trans_ratio), transforms.ChromaticJitter(self.data_aug_color_jitter_std), # t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max), ] self.input_transforms = transforms.Compose( self.input_transforms) # Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate # augmentation has to be done before voxelization self.SCALE_AUGMENTATION_BOUND = (0.9, 1.1) self.ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi, np.pi)) self.TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0)) self.voxelizer = Voxelizer( voxel_size=self.VOXEL_SIZE, clip_bound=self.CLIP_BOUND, use_augmentation=augment_data, scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND, rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND, translation_augmentation_ratio_bound=self. TRANSLATION_AUGMENTATION_RATIO_BOUND, ignore_label=True)
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None, voxel_generator_2=None): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]) #voxel_generator_2 = VoxelGenerator( # voxel_size=config.VOXEL_SIZE, # point_cloud_range=self.point_cloud_range, # max_num_points=config.MAX_POINTS_PER_VOXEL, # max_voxels=60000 #) grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array( config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator, voxel_generator_2=voxel_generator_2) points = data_dict['points'] points = data_dict['points_sp'] indices = data_dict['indices'] #print(points.shape) """ add code for visibility """ ori_points = points[:, [0, 1, 2, 4]] #print(points[:,-1]) voxel_size = self.voxel_size pc_range = self.point_cloud_range #print(pc_range) #print(self.voxel_size) origins = data_dict['origins'] num_points = points.shape[0] num_original = num_points time_stamps = np.array([0], dtype=np.float32) time_stamps = points[ indices[:-1], -1] # counting on the fact we do not miss points from any intermediate time_stamps time_stamps = (time_stamps[:-1] + time_stamps[1:]) / 2 time_stamps = [-1000.0] + time_stamps.tolist() + [1000.0 ] # add boundaries time_stamps = np.array(time_stamps) num_original = indices[-1] #print(time_stamps) #print(points.shape) #sys.exit() if num_points > num_original: #print("this is test sample") original_points, sampled_points = ori_points[: num_original, :], ori_points[ num_original:, :] visibility, original_mask, sampled_mask = mapping.compute_logodds_and_masks( original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size)) points = np.concatenate( (original_points[original_mask], sampled_points[sampled_mask])) else: visibility = mapping.compute_logodds(ori_points, origins, time_stamps, pc_range, 0.2) np.set_printoptions(threshold=sys.maxsize) visi_map = np.zeros([512, 512, 3]) visibility = np.int64(visibility) visibility = np.reshape(visibility, (40, 512, 512))[0:40, :, :] visibility = np.transpose(visibility, (2, 1, 0)) #print(visibility) #sys.exit() mask_occ = (visibility >= 1).nonzero() #print(mask_occ) mask_free = (visibility == 0).nonzero() mask_unknown = (visibility == -1).nonzero() visi_map[np.int64(mask_free[0]), np.int64(mask_free[1]), :] = np.array([255, 0, 0]) / 255 visi_map[np.int64(mask_occ[0]), np.int64(mask_occ[1]), :] = np.array([0, 255, 0]) / 255 visi_map[mask_unknown[0], mask_unknown[1], :] = np.array([0, 0, 255]) / 255 #print(.shape) #visibility = np.pad(visibility, ((0,2),(0,0)), 'edge') data_dict['vis'] = visibility #print(data_dict.keys()) dense_points = data_dict['dense_point'] points = data_dict['points'] #print(points.shape) voxel_output = voxel_generator.generate(points) #voxel_dense = voxel_generator.generate(dense_points) #print(voxel_dense[...,-1]) #print(voxel_output['voxels'].shape) #sys.exit() if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output if not data_dict['use_lead_xyz']: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) #data_dict['dense_pillar'] = voxel_dense['voxels'] #data_dict['dense_pillar_coords'] = voxel_dense['coordinates'] data_dict['voxels'] = voxels data_dict['voxel_coords'] = coordinates data_dict['voxel_num_points'] = num_points return data_dict