Exemplo n.º 1
0
    def test_pruning(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        use_feat = torch.rand(feats.size(0)) < 0.5
        pruning = MinkowskiPruning(D)
        output = pruning(input, use_feat)
        print(use_feat, output)

        # Check backward
        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = pruning(input, use_feat)
            print(output)

        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))
Exemplo n.º 2
0
    def test_broadcast_gpu(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        coords, feats_glob, labels = data_loader(in_channels)
        feats = feats.double()
        feats_glob = feats_glob.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling(dimension=D)
        input_glob = pool(input)
        input_glob.F.requires_grad_()
        broadcast = MinkowskiBroadcastAddition(D)
        output = broadcast(input, input_glob)
        print(output)

        # Check backward
        fn = MinkowskiBroadcastFunction()

        device = torch.device('cuda')
        input = input.to(device)
        input_glob = input_glob.to(device)
        output = broadcast(input, input_glob)
        print(output)
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.ADDITION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.MULTIPLICATION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))
Exemplo n.º 3
0
    def test_avgpooling_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiAvgPooling(kernel_size=3, stride=2, dimension=D)
        output = pool(input)
        print(output)

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            pool = pool.to(device)
            output = pool(input)
            print(output)

        # Check backward
        fn = MinkowskiAvgPoolingFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_, True,
                 input.coords_key, None, input.coords_man)))
Exemplo n.º 4
0
    def test_union(self):
        coords1 = torch.IntTensor([[0, 0], [0, 1]])
        coords2 = torch.IntTensor([[0, 1], [1, 1]])
        feats1 = torch.DoubleTensor([[1], [2]])
        feats2 = torch.DoubleTensor([[3], [4]])
        input1 = SparseTensor(coords=ME.utils.batched_coordinates([coords1]), feats=feats1)

        input2 = SparseTensor(
            feats=feats2,
            coords=ME.utils.batched_coordinates([coords2]),
            coords_manager=input1.coords_man,  # Must use same coords manager
            force_creation=True  # The tensor stride [1, 1] already exists.
        )

        input1.requires_grad_()
        input2.requires_grad_()
        union = MinkowskiUnion()
        output = union(input1, input2)
        print(output)

        self.assertTrue(len(output) == 3)
        self.assertTrue(5 in output.F)
        output.F.sum().backward()

        # Grad of sum feature is 1.
        self.assertTrue(torch.prod(input1.F.grad) == 1)
        self.assertTrue(torch.prod(input2.F.grad) == 1)

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input1, input2 = input1.to(device), input2.to(device)
            output = union(input1, input2)

            output.F.sum().backward()
            print(output)
            self.assertTrue(len(output) == 3)
            self.assertTrue(5 in output.F)
Exemplo n.º 5
0
    def test_maxpooling(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        feats.requires_grad_()
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D)
        print(pool)
        output = pool(input)
        print(input)
        print(output)
        C = output.coords_man
        print(C.get_coords(2))
        region_type, _, _ = pool.kernel_generator.cache[(1, 1)]
        print(
            C.get_kernel_map(
                1,
                2,
                stride=2,
                kernel_size=2,
                region_type=region_type,
                is_pool=True))
        # Check backward
        fn = MinkowskiMaxPoolingFunction()

        # Even numbered kernel_size error!
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_,
                 input.coords_key, None, input.coords_man)))

        if not torch.cuda.is_available():
            return

        device = torch.device('cuda')
        input = input.to(device)
        output = pool(input)
        print(output)

        # Check backward
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_,
                 input.coords_key, None, input.coords_man)))
Exemplo n.º 6
0
    def test_broadcast_gpu(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        coords, feats_glob, labels = data_loader(in_channels)
        feats = feats.double()
        feats_glob = feats_glob.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling()
        input_glob = pool(input)
        input_glob.F.requires_grad_()
        broadcast_add = MinkowskiBroadcastAddition()
        broadcast_mul = MinkowskiBroadcastMultiplication()
        broadcast_cat = MinkowskiBroadcastConcatenation()
        cpu_add = broadcast_add(input, input_glob)
        cpu_mul = broadcast_mul(input, input_glob)
        cpu_cat = broadcast_cat(input, input_glob)

        # Check backward
        fn = MinkowskiBroadcastFunction()

        device = torch.device('cuda')

        input = input.to(device)
        input_glob = input_glob.to(device)
        gpu_add = broadcast_add(input, input_glob)
        gpu_mul = broadcast_mul(input, input_glob)
        gpu_cat = broadcast_cat(input, input_glob)

        self.assertTrue(
            torch.prod(gpu_add.F.cpu() - cpu_add.F < 1e-5).item() == 1)
        self.assertTrue(
            torch.prod(gpu_mul.F.cpu() - cpu_mul.F < 1e-5).item() == 1)
        self.assertTrue(
            torch.prod(gpu_cat.F.cpu() - cpu_cat.F < 1e-5).item() == 1)

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.ADDITION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.MULTIPLICATION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))
Exemplo n.º 7
0
	def visualize_data(
			self, model: Model, writer: SummaryWriter,
			dataset: Dataset, indices: List, tag, step
	):
		# visualize one data
		batch = [dataset[i] for i in indices]
		coords, feats, label, _ = list(zip(*batch))
		coords, feats, = sparse_collate(coords, feats)
		x = SparseTensor(feats, coords)

		x = x.to(model.device)
		with torch.no_grad():
			y = model(x)
		pred = y['pred']
		pred_choices = pred.max(dim=1).indices

		for i in range(len(indices)):
			# get indices with specific indices
			data_indices = (y.C[:, 3] == i).nonzero().squeeze(1)
			coord = coords[data_indices, :3].type(torch.FloatTensor)
			coord = coord * self.config['voxel_size']
			coord = torch.stack([coord, coord])  # Tensor of 2 x N x 3
			pred_choice = pred_choices[data_indices]

			# add color for prediction
			pred_color = torch.stack(
				[self.cmap[point] for point in pred_choice],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			gt_color = torch.stack(
				[self.cmap[point] for point in label[i]],
				dim=0
			)  # Tensor of N x 3 (1 for batch)
			color = torch.stack([pred_color, gt_color], dim=0)  # Tensor of 2 x N x 3
			color = (color * 255).type(torch.IntTensor)

			max_sample = self.config['max_vis_sample']
			if coord.shape[1] > max_sample:
				perm = np.random.RandomState(0).permutation(coord.shape[1])
				coord = coord[:, perm[:max_sample], :]
				color = color[:, perm[:max_sample], :]

			writer.add_mesh(
				tag=tag + '/vis_%d' % i, vertices=coord,
				colors=color, global_step=step
			)
Exemplo n.º 8
0
    def test_unpooling_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    dimension=D)
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3,
                                           stride=2,
                                           dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, False,
                           input.coords_key, None, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = unpool(input)
            print(output)

        # Check backward
        fn = MinkowskiAvgPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, True,
                           input.coords_key, None, input.coords_man)))
Exemplo n.º 9
0
    def test_global_maxpool(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalMaxPooling()
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalMaxPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))

        if torch.cuda.is_available():
            input_cuda = input.to(torch.device(0))
            output_cuda = pool(input)
            self.assertTrue(torch.allclose(output_cuda.F.cpu(), output.F))
Exemplo n.º 10
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        print(conv)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device('cuda')
        input = input.to(device)
        conv = conv.to(device)
        output = conv(input)
        print(output)
        print(output.F, output.coords)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))
Exemplo n.º 11
0
	def visualize(self, options, model: Model, writer: SummaryWriter, step):
		training = model.training
		model.eval()

		vis_config = self.config['vis']

		if vis_config.get('num_scene_samples'):
			# sample k data points from n data points with equal interval
			n = len(self)
			k = vis_config.get('num_scene_samples')
			vis_indices = torch.linspace(0, n - 1, k) \
				.type(torch.IntTensor).tolist()
		else:
			vis_indices = [self.dir2idx[i] for i in vis_config.get('scene_names')]

		if self.config['overfit_one_ex']:
			vis_scene = self.config['overfit_one_ex']
			vis_indices = [self.dir2idx[vis_scene]]
			vis_indices = list(set(vis_indices))

		for i in vis_indices:
			coords, feats, labels, _ = self[i]
			coords, feats, = sparse_collate([coords], [feats])
			x = SparseTensor(feats, coords)

			x = x.to(model.device)
			with torch.no_grad():
				y_hat = model(x)

			embs = y_hat
			insts = labels[:, 1]

			for option in options:
				# visualize tsne
				if option == 'tsne':
					tsne_img = visualization.visualize_tsne(
						embs.cpu(), insts.cpu(),
						config=self.config['vis']['tsne']
					)
					writer.add_image('tsne/{}'.format(self.idx2dir[i]), tsne_img, step)

				elif option == 'embs':
					vis_config = self.config['vis']['embs']

					# visualize embs with background
					emb_imgs, axis_range = visualization.visualize_embs(
						embs.cpu(), insts.cpu(),
						remove_bg=False, max_sample=vis_config['max_sample'],
						num_view=vis_config['num_view']
					)
					for view_num, img in enumerate(emb_imgs):
						writer.add_image(
							'emb/with_bg/{}_{}'.format(self.idx2dir[i], view_num),
							img, step
						)

					# visualize embs without background
					not_bg_emb_imgs, _ = visualization.visualize_embs(
						embs.cpu(), insts.cpu(),
						remove_bg=True, max_sample=vis_config['max_sample'],
						num_view=vis_config['num_view'], axis_range=axis_range
					)
					for view_num, img in enumerate(not_bg_emb_imgs):
						writer.add_image(
							'emb/no_bg/{}_{}'.format(self.idx2dir[i], view_num),
							img, step
						)

			model.train(training)