예제 #1
0
파일: data.py 프로젝트: xuhuahaoren/ssen
	def collate_fn(self, batch):
		coords, features, labels, indices = list(zip(*batch))
		coords, features, labels = sparse_collate(coords, features, labels)

		if self.train:
			return SparseTensor(features, coords=coords), labels

		dir_names = [self.idx2dir[idx] for idx in indices]
		return SparseTensor(features, coords=coords), labels, dir_names
예제 #2
0
    def test_device2(self):
        print(f"{self.__class__.__name__}: test_device2 SparseTensor")
        if not is_cuda_available():
            return

        coordinates = np.random.rand(8192,3) * 200
        quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)
        bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])
        bcoords, bfeats = bcoords.cuda(), bfeats.cuda()
        print(bcoords, bfeats)
        SparseTensor(bfeats, bcoords)
예제 #3
0
파일: data.py 프로젝트: xuhuahaoren/ssen
	def visualize_data(
			self, model: Model, writer: SummaryWriter,
			dataset: Dataset, indices: List, tag, step
	):
		# visualize one data
		batch = [dataset[i] for i in indices]
		coords, feats, label, _ = list(zip(*batch))
		coords, feats, = sparse_collate(coords, feats)
		x = SparseTensor(feats, coords)

		x = x.to(model.device)
		with torch.no_grad():
			y = model(x)
		pred = y['pred']
		pred_choices = pred.max(dim=1).indices

		for i in range(len(indices)):
			# get indices with specific indices
			data_indices = (y.C[:, 3] == i).nonzero().squeeze(1)
			coord = coords[data_indices, :3].type(torch.FloatTensor)
			coord = coord * self.config['voxel_size']
			coord = torch.stack([coord, coord])  # Tensor of 2 x N x 3
			pred_choice = pred_choices[data_indices]

			# add color for prediction
			pred_color = torch.stack(
				[self.cmap[point] for point in pred_choice],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			gt_color = torch.stack(
				[self.cmap[point] for point in label[i]],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			color = torch.stack([pred_color, gt_color], dim=0)  # Tensor of 2 x N x 3
			color = (color * 255).type(torch.IntTensor)

			max_sample = self.config['max_vis_sample']
			if coord.shape[1] > max_sample:
				perm = np.random.RandomState(0).permutation(coord.shape[1])
				coord = coord[:, perm[:max_sample], :]
				color = color[:, perm[:max_sample], :]

			writer.add_mesh(
				tag=tag + '/vis_%d' % i, vertices=coord,
				colors=color, global_step=step
			)
예제 #4
0
파일: data.py 프로젝트: xuhuahaoren/ssen
	def visualize(self, options, model: Model, writer: SummaryWriter, step):
		training = model.training
		model.eval()

		vis_config = self.config['vis']

		if vis_config.get('num_scene_samples'):
			# sample k data points from n data points with equal interval
			n = len(self)
			k = vis_config.get('num_scene_samples')
			vis_indices = torch.linspace(0, n - 1, k) \
				.type(torch.IntTensor).tolist()
		else:
			vis_indices = [self.dir2idx[i] for i in vis_config.get('scene_names')]

		if self.config['overfit_one_ex']:
			vis_scene = self.config['overfit_one_ex']
			vis_indices = [self.dir2idx[vis_scene]]
			vis_indices = list(set(vis_indices))

		for i in vis_indices:
			coords, feats, labels, _ = self[i]
			coords, feats, = sparse_collate([coords], [feats])
			x = SparseTensor(feats, coords)

			x = x.to(model.device)
			with torch.no_grad():
				y_hat = model(x)

			embs = y_hat
			insts = labels[:, 1]

			for option in options:
				# visualize tsne
				if option == 'tsne':
					tsne_img = visualization.visualize_tsne(
						embs.cpu(), insts.cpu(),
						config=self.config['vis']['tsne']
					)
					writer.add_image('tsne/{}'.format(self.idx2dir[i]), tsne_img, step)

				elif option == 'embs':
					vis_config = self.config['vis']['embs']

					# visualize embs with background
					emb_imgs, axis_range = visualization.visualize_embs(
						embs.cpu(), insts.cpu(),
						remove_bg=False, max_sample=vis_config['max_sample'],
						num_view=vis_config['num_view']
					)
					for view_num, img in enumerate(emb_imgs):
						writer.add_image(
							'emb/with_bg/{}_{}'.format(self.idx2dir[i], view_num),
							img, step
						)

					# visualize embs without background
					not_bg_emb_imgs, _ = visualization.visualize_embs(
						embs.cpu(), insts.cpu(),
						remove_bg=True, max_sample=vis_config['max_sample'],
						num_view=vis_config['num_view'], axis_range=axis_range
					)
					for view_num, img in enumerate(not_bg_emb_imgs):
						writer.add_image(
							'emb/no_bg/{}_{}'.format(self.idx2dir[i], view_num),
							img, step
						)

			model.train(training)
예제 #5
0
파일: data.py 프로젝트: xuhuahaoren/ssen
	def collate_fn(self, batch):
		coords, features, labels = list(zip(*batch))
		coords, features, labels = sparse_collate(coords, features, labels)
		return SparseTensor(features, coords=coords), labels
예제 #6
0
파일: eval.py 프로젝트: xuhuahaoren/ssen
    args = parser.parse_args()

    # Load config
    config_path = args.config
    config = yaml.load(open(config_path), Loader=yaml.FullLoader)

    # Load data
    raw_data = torch.load('data/example_scene.pt')
    coords, feats = raw_data[:, :3], raw_data[:, 3:6]
    feats = feats - 0.5
    coords = torch.floor(coords / config['voxel_size']).cpu()
    idxs = sparse_quantize(coords.numpy(),
                           return_index=True,
                           quantization_size=1)
    # coords, feats = coords[idxs], feats[idxs]
    coords, feats = sparse_collate([coords[idxs]], [feats[idxs]])
    x = SparseTensor(feats, coords.int()).to(config['device'])

    # Load semantic segmentation model
    semantic_model = MODEL['semantic-segmentation-model'](config, None)
    state_dict = torch.load(config['semantic_model']['path'])
    semantic_model.load_state_dict(state_dict)
    semantic_model.to(config['device'])
    semantic_model.eval()

    # Forward pass the semantic model
    with torch.no_grad():
        semantic_labels = semantic_model(x)
    semantic_labels = semantic_labels.max(dim=1).indices  # Tensor of N

    # remove labels predicted as wall and floor