def test_quantization_size(self): coords = torch.randn((1000, 3), dtype=torch.float) feats = torch.randn((1000, 10), dtype=torch.float) res = sparse_quantize(coords, feats, quantization_size=0.1) print(res[0].shape, res[1].shape) res = sparse_quantize(coords.numpy(), feats.numpy(), quantization_size=0.1) print(res[0].shape, res[1].shape)
def test_device(self): N = 16575 coords = np.random.rand(N, 3) * 100 # Make duplicates coords[:3] = 0 unique_map = sparse_quantize(coords.astype(np.int32), return_maps_only=True, device="cpu") print(len(unique_map)) unique_map = sparse_quantize(coords.astype(np.int32), return_maps_only=True, device="cuda") print(len(unique_map))
def test_collision(self): coords = np.array([[0, 0], [0, 0], [0, 0], [0, 1]], dtype=np.int32) labels = np.array([0, 1, 2, 3], dtype=np.int32) mapping, colabels = sparse_quantize(coords, labels=labels, ignore_label=255) print(mapping) print(colabels) coords = np.array([[0, 0], [0, 1]], dtype=np.int32) discrete_coords = sparse_quantize(coords) print(discrete_coords) discrete_coords = sparse_quantize(torch.from_numpy(coords)) print(discrete_coords)
def test_label(self): N = 16575 ignore_label = 255 coords = (np.random.rand(N, 3) * 100).astype(np.int32) feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) labels = labels.astype(np.int32) # Make duplicates coords[:3] = 0 labels[:3] = 2 mapping, colabels = MEB.quantize_label_np(coords, labels, ignore_label) print('Unique labels and counts:', np.unique(colabels, return_counts=True)) print('N unique:', len(mapping), 'N:', N) mapping, colabels = MEB.quantize_label_th(torch.from_numpy(coords), torch.from_numpy(labels), ignore_label) print('Unique labels and counts:', np.unique(colabels, return_counts=True)) print('N unique:', len(mapping), 'N:', N) qcoords, qfeats, qlabels = sparse_quantize(coords, feats, labels, ignore_label) self.assertTrue(len(mapping) == len(qcoords))
def slice_no_duplicate(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 # Extract unique coords coords, colors = sparse_quantize(coords / voxel_size, colors) bcoords = batched_coordinates([coords], dtype=torch.float32) colors = torch.from_numpy(colors).float() tfield = TensorField(colors, bcoords) network = nn.Sequential( MinkowskiLinear(3, 16), MinkowskiBatchNorm(16), MinkowskiReLU(), MinkowskiLinear(16, 32), MinkowskiBatchNorm(32), MinkowskiReLU(), MinkowskiToSparseTensor(), MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3), MinkowskiConvolutionTranspose(64, 32, kernel_size=3, stride=2, dimension=3), ) otensor = network(tfield) ofield = otensor.slice(tfield) self.assertEqual(len(tfield), len(ofield)) self.assertEqual(ofield.F.size(1), otensor.F.size(1)) ofield = otensor.cat_slice(tfield) self.assertEqual(len(tfield), len(ofield)) self.assertEqual(ofield.F.size(1), (otensor.F.size(1) + tfield.F.size(1)))
def test_collision(self): coords = np.array([[0, 0], [0, 0], [0, 0], [0, 1]], dtype=np.int32) labels = np.array([0, 1, 2, 3], dtype=np.int32) unique_coords, colabels = sparse_quantize(coords, labels=labels, ignore_label=255) self.assertTrue(len(unique_coords) == 2) self.assertTrue(torch.IntTensor([0, 0]) in unique_coords) self.assertTrue(torch.IntTensor([0, 1]) in unique_coords) self.assertTrue(len(colabels) == 2) coords = np.array([[0, 0], [0, 1]], dtype=np.int32) discrete_coords = sparse_quantize(coords) self.assertTrue((discrete_coords == unique_coords).all()) discrete_coords = sparse_quantize(torch.from_numpy(coords)) self.assertTrue((discrete_coords == unique_coords).all())
def quantize_data(self, coords, feats, labels): # Create SparseTensor coords = torch.floor(coords / self.config['voxel_size']).cpu() coords = coords - coords.min(0).values idxs = sparse_quantize( coords.numpy(), return_index=True, quantization_size=1 ) return coords[idxs], feats[idxs], labels[idxs]
def read(self, data): label = json.load(open(data[2])) with open(data[1], 'rb') as f: pcd = torch.Tensor(np.load(data[1])) # ,allow_pickle=False # print(pcd.shape) pcd = pcd r = torch.ones(pcd.shape[0]).view(-1, 1) label_map = torch.zeros(pcd.shape[0]) #-1 instance_labels = torch.zeros(pcd.shape[0]) vote_label = torch.zeros_like(pcd) center_label = [] for i, l in enumerate(label): l_num = label_set.index(l['obj_type']) index, center = self.transform_index(l['psr'], pcd) label_map[index] = l_num instance_labels[index] = i + 1 #[index] center_label.append(l_num) vote_label[index] = (pcd[index] - center) #/0.05 coords = (pcd - pcd.min(0)[0]) / 0.05 coords = coords.int() # coords, r, sem_label = sparse_quantize(coords, feats=r.reshape(-1,1), labels=sem_label.astype(np.int32), ignore_label=0, quantization_size=scale)#ME.utils. ind = sparse_quantize(coords, feats=r.reshape(-1, 1), labels=label_map.int(), ignore_label=0, return_index=True) sp_data = {} instance_labels = instance_labels[ind[0]].long() vote_label = vote_label[ind[0]] / 0.05 #[instance_labels] # ME.utils.quantization_size=scale, # ins_label = torch.Tensor(label_map.astype(np.int))[ind[0]] # center_label: torch.Tensor # y: torch.Tensor # num_instances: torch.Tensor # instance_labels: torch.Tensor # instance_mask: torch.Tensor # vote_label: torch.Tensor sp_data["pos"] = pcd[ind[0]] sp_data["coords"] = coords[ind[0]] #[ind[0]] sp_data["rgb"] = r sp_data["y"] = ind[1].type(torch.LongTensor) #label_map# sp_data["x"] = r[ind[0]] sp_data["instance_labels"] = instance_labels sp_data["center_label"] = torch.Tensor(center_label) sp_data["num_instances"] = torch.Tensor(len(label)) sp_data[ "instance_mask"] = instance_labels > 0 #[ind[0]].type(torch.LongTensor) sp_data["vote_label"] = vote_label #[ind[0]] # sp_data["instance_bboxes"] = torch.from_numpy(instance_bboxes) # sp_data =Data(**sp_data) sp_data = Data(**sp_data) return sp_data
def test_device2(self): print(f"{self.__class__.__name__}: test_device2 SparseTensor") if not is_cuda_available(): return coordinates = np.random.rand(8192,3) * 200 quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates) bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features]) bcoords, bfeats = bcoords.cuda(), bfeats.cuda() print(bcoords, bfeats) SparseTensor(bfeats, bcoords)
def test_collision(self): coords = np.array([[0, 0], [0, 0], [0, 0], [0, 1]], dtype=np.int32) labels = np.array([0, 1, 2, 3], dtype=np.int32) unique_coords, colabels = sparse_quantize(coords, labels=labels, ignore_label=255) print(unique_coords) print(colabels) self.assertTrue(len(unique_coords) == 2) self.assertTrue(np.array([0, 0]) in unique_coords) self.assertTrue(np.array([0, 1]) in unique_coords) self.assertTrue(len(colabels) == 2) self.assertTrue(255 in colabels)
def test_label(self): N = 16575 coords = (np.random.rand(N, 3) * 100).astype(np.int32) feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) labels = labels.astype(np.int32) # Make duplicates coords[:3] = 0 labels[:3] = 2 qcoords, qfeats, qlabels, mapping, inverse_mapping = sparse_quantize( coords, feats, labels, return_index=True, return_inverse=True) self.assertTrue(len(mapping) == len(qcoords))
def test_mapping(self): N = 16575 coords = (np.random.rand(N, 3) * 100).astype(np.int32) mapping, inverse_mapping = MEB.quantize_np(coords) print("N unique:", len(mapping), "N:", N) self.assertTrue((coords == coords[mapping][inverse_mapping]).all()) self.assertTrue((coords == coords[mapping[inverse_mapping]]).all()) coords = torch.from_numpy(coords) mapping, inverse_mapping = MEB.quantize_th(coords) print("N unique:", len(mapping), "N:", N) self.assertTrue((coords == coords[mapping[inverse_mapping]]).all()) unique_coords, index, reverse_index = sparse_quantize( coords, return_index=True, return_inverse=True) self.assertTrue((coords == coords[index[reverse_index]]).all())
def test(self): N = 16575 ignore_label = 255 coords = np.random.rand(N, 3) * 100 feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) labels = labels.astype(np.int32) # Make duplicates coords[:3] = 0 labels[:3] = 2 quantized_coords, quantized_feats, quantized_labels = sparse_quantize( coords.astype(np.int32), feats, labels, ignore_label) print(quantized_labels)
def down_sample(self): '''down sample pointcloud using FCGF ''' feats = [] feats.append(np.ones((self.pointcloud.shape[0], 1))) feats = np.hstack(feats) coords = np.floor(self.pointcloud / 0.0075) inds = ME_utils.sparse_quantize(coords, return_index=True) coords = coords[inds] coords = np.hstack([coords, np.zeros((len(coords), 1))]) pointcloud_down = self.pointcloud[inds] feats = feats[inds] point_down_path = self.save_root_path.replace('.xyz', '.down') np.savetxt(self.save_root_path.replace('.xyz', '.down'), pointcloud_down, fmt='%0.6f') pointcloud_down = np.loadtxt(point_down_path) np.savetxt(self.save_root_path.replace('.xyz', '.feats'), feats) np.savetxt(self.save_root_path.replace('.xyz', '.coords'), coords) return pointcloud_down
def test(self): N = 16575 ignore_label = 255 coords = np.random.rand(N, 3) * 100 feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) labels = labels.astype(np.int32) # Make duplicates coords[:3] = 0 labels[:3] = 2 key = ravel_hash_vec(coords) # floor happens by astype(np.uint64) inds, labels_v = MEB.SparseVoxelization(key, labels.astype(np.int32), ignore_label, True) coords_v, feats_v = coords[inds], feats[inds] print(coords_v, feats_v) outputs = sparse_quantize(coords, feats, labels, ignore_label) print(outputs)
def _sparse_quantize(self, gt_feats, gt_pcd, pcd, pcd_feats): pcd, pcd_feats = sparse_quantize( pcd, pcd_feats, quantization_size=self._config.quantization_size) gt_pcd, gt_feats = sparse_quantize( gt_pcd, gt_feats, quantization_size=self._config.quantization_size) return gt_feats, gt_pcd, pcd, pcd_feats
parser.add_argument('--config', '-c', default='configs/scannet-is-high_dim-eval.yaml') args = parser.parse_args() # Load config config_path = args.config config = yaml.load(open(config_path), Loader=yaml.FullLoader) # Load data raw_data = torch.load('data/example_scene.pt') coords, feats = raw_data[:, :3], raw_data[:, 3:6] feats = feats - 0.5 coords = torch.floor(coords / config['voxel_size']).cpu() idxs = sparse_quantize(coords.numpy(), return_index=True, quantization_size=1) # coords, feats = coords[idxs], feats[idxs] coords, feats = sparse_collate([coords[idxs]], [feats[idxs]]) x = SparseTensor(feats, coords.int()).to(config['device']) # Load semantic segmentation model semantic_model = MODEL['semantic-segmentation-model'](config, None) state_dict = torch.load(config['semantic_model']['path']) semantic_model.load_state_dict(state_dict) semantic_model.to(config['device']) semantic_model.eval() # Forward pass the semantic model with torch.no_grad(): semantic_labels = semantic_model(x)