Exemplo n.º 1
0
    def test_feature_grids_to_spc(self, sparse_feature_grids,
                                  expected_out_feature_grids, device):
        octrees, lengths, features = feature_grids_to_spc(sparse_feature_grids)
        assert octrees.device.type == device
        assert features.device.type == device
        octrees = octrees.cuda()
        features = features.cuda()
        max_level, pyramids, exsum = scan_octrees(octrees, lengths)
        point_hierarchies = generate_points(octrees, pyramids, exsum)
        out_feature_grids = to_dense(point_hierarchies, pyramids, features,
                                     max_level)

        assert torch.equal(out_feature_grids, expected_out_feature_grids)
Exemplo n.º 2
0
 def test_zeros(self, batch_size, feature_dim, height, width, depth, dtype,
                device):
     feature_grids = torch.zeros(
         (batch_size, feature_dim, height, width, depth),
         dtype=dtype,
         device=device)
     octrees, lengths, features = feature_grids_to_spc(feature_grids)
     assert torch.equal(
         octrees, torch.zeros((batch_size),
                              dtype=torch.uint8,
                              device=device))
     assert torch.equal(
         lengths, torch.ones((batch_size), dtype=torch.int, device='cpu'))
     assert torch.equal(
         features, torch.empty((0, feature_dim), dtype=dtype,
                               device=device))
Exemplo n.º 3
0
 def test_ones(self, batch_size, feature_dim, height, width, depth, dtype,
               device):
     feature_grids = torch.ones(
         (batch_size, feature_dim, height, width, depth),
         dtype=dtype,
         device=device)
     octrees, lengths, features = feature_grids_to_spc(feature_grids)
     assert octrees.device.type == device
     assert features.device.type == device
     octrees = octrees.cuda()
     features = features.cuda()
     max_level, pyramids, exsum = scan_octrees(octrees, lengths)
     point_hierarchies = generate_points(octrees, pyramids, exsum)
     out_feature_grids = to_dense(point_hierarchies, pyramids, features,
                                  max_level)
     assert torch.all(out_feature_grids[:, :, :height, :width, :depth] == 1)
     assert torch.all(out_feature_grids[:, :, height:] == 0)
     assert torch.all(out_feature_grids[:, :, :, width:] == 0)
     assert torch.all(out_feature_grids[..., depth:] == 0)
Exemplo n.º 4
0
    def test_to_dense(self, batch_size, max_level, feature_dim):
        octrees, lengths = random_spc_octrees(batch_size, max_level, 'cuda')

        max_level, pyramids, exsum = scan_octrees(octrees, lengths)
        point_hierarchies = generate_points(octrees, pyramids, exsum)
        in_num_nodes = torch.sum(pyramids[:, 0, -2])
        coalescent_features = torch.rand((in_num_nodes, feature_dim),
                                         device='cuda',
                                         requires_grad=True)
        expected_size = 2**max_level
        feat_idx = []
        bs_start_idx = 0
        for bs in range(batch_size):
            start_idx = pyramids[bs, 1, -2] + bs_start_idx
            num_points = pyramids[bs, 0, -2]
            feat_idx.append(
                torch.nn.functional.pad(point_hierarchies[start_idx:start_idx +
                                                          num_points], (1, 0),
                                        value=bs))
            bs_start_idx += pyramids[bs, 1, -1]
        feat_idx = torch.cat(feat_idx, dim=0).permute(1, 0).long()
        expected_feature_grids = torch.zeros(
            (batch_size, feature_dim, expected_size, expected_size,
             expected_size),
            device='cuda')
        expected_feature_grids[feat_idx[0], :, feat_idx[1], feat_idx[2],
                               feat_idx[3]] = coalescent_features

        # test forward
        feature_grids = to_dense(point_hierarchies, pyramids,
                                 coalescent_features, max_level)
        assert torch.equal(expected_feature_grids, feature_grids)

        grad_out = torch.rand_like(feature_grids)
        feature_grids.backward(grad_out)
        octrees, lengths, coalescent_expected_grad = feature_grids_to_spc(
            grad_out, torch.any(feature_grids != 0, dim=1))
        assert torch.equal(coalescent_features.grad, coalescent_expected_grad)
Exemplo n.º 5
0
 def octrees_lengths_features(self, feature_grids, sparsity_masks):
     return spc.feature_grids_to_spc(feature_grids, sparsity_masks)
Exemplo n.º 6
0
    def test_conv_transpose3d(self, height, width, depth, in_channels,
                              out_channels, sparsity_masks, dense_weight, bias,
                              octrees, lengths, max_level, pyramids, exsum,
                              point_hierarchies, kernel_vectors, kernel_size,
                              kernel_offset, spc_weight, jump,
                              with_spc_to_dict):
        stride = 2**jump

        if stride > kernel_size:
            pytest.skip('stride higher than kernel_size is not tested')

        out_sparsity_masks = sparsity_masks
        in_level = max_level - jump
        in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)])
        coalescent_features = torch.rand((in_num_nodes, in_channels),
                                         device='cuda',
                                         requires_grad=True)

        dense_weight = dense_weight.detach()
        dense_weight.requires_grad = True
        spc_weight = spc_weight.detach()
        spc_weight.requires_grad = True
        if with_spc_to_dict:
            input_spc = Spc(octrees, lengths)
            feature_grids = spc.to_dense(**input_spc.to_dict(),
                                         input=coalescent_features,
                                         level=in_level)
        else:
            feature_grids = spc.to_dense(point_hierarchies, pyramids,
                                         coalescent_features, in_level)
        feature_grids = feature_grids[:, :, :math.ceil(height / stride), :math.
                                      ceil(width / stride), :math.ceil(depth /
                                                                       stride)]
        feature_grids = feature_grids.detach()
        feature_grids.requires_grad = True
        if with_spc_to_dict:
            sparsity_masks = spc.to_dense(**input_spc.to_dict(),
                                          input=torch.ones_like(
                                              coalescent_features),
                                          level=in_level).bool()
        else:
            sparsity_masks = spc.to_dense(point_hierarchies, pyramids,
                                          torch.ones_like(coalescent_features),
                                          in_level).bool()
        sparsity_masks = sparsity_masks[:,
                                        0, :math.ceil(height / stride), :math.
                                        ceil(width /
                                             stride), :math.ceil(depth /
                                                                 stride)]

        # test forward
        if with_spc_to_dict:
            output_features, output_level = spc.conv_transpose3d(
                **input_spc.to_dict(),
                level=in_level,
                input=coalescent_features,
                weight=spc_weight,
                kernel_vectors=kernel_vectors,
                jump=jump,
                bias=bias)
            output = spc.to_dense(**input_spc.to_dict(),
                                  input=output_features,
                                  level=output_level)
        else:
            output_features, output_level = spc.conv_transpose3d(
                octrees,
                point_hierarchies,
                in_level,
                pyramids,
                exsum,
                coalescent_features,
                spc_weight,
                kernel_vectors,
                jump=jump,
                bias=bias)
            output = spc.to_dense(point_hierarchies, pyramids, output_features,
                                  output_level)

        output = output[:, :, :height, :width, :depth]

        expected_output = torch.nn.functional.conv_transpose3d(
            feature_grids,
            dense_weight.permute(1, 0, 2, 3, 4),
            stride=stride,
            bias=bias,
            output_padding=stride - 1)[:, :,
                                       kernel_offset:height + kernel_offset,
                                       kernel_offset:width + kernel_offset,
                                       kernel_offset:depth + kernel_offset]
        expected_output *= out_sparsity_masks.unsqueeze(1)
        assert output_level == max_level
        assert torch.allclose(output, expected_output, rtol=1e-3, atol=1e-3)
        # test backward
        grad_out = torch.rand_like(expected_output)
        expected_output.backward(grad_out)
        output.backward(grad_out)
        _, _, sparsified_grad = spc.feature_grids_to_spc(
            feature_grids.grad, sparsity_masks)
        assert torch.allclose(coalescent_features.grad,
                              sparsified_grad,
                              rtol=5e-2,
                              atol=5e-2)
        assert torch.allclose(spc_weight.grad,
                              dense_weight.grad.reshape(
                                  out_channels, in_channels,
                                  -1).permute(2, 1, 0),
                              rtol=5e-2,
                              atol=5e-2)
Exemplo n.º 7
0
    def test_conv3d(self, height, width, depth, in_channels, out_channels,
                    kernel_size, feature_grids, sparsity_masks, dense_weight,
                    bias, octrees, lengths, coalescent_features, max_level,
                    pyramids, exsum, point_hierarchies, kernel_vectors,
                    kernel_offset, spc_weight, jump, with_spc_to_dict):
        stride = 2**jump
        coalescent_features = coalescent_features.detach()
        coalescent_features.requires_grad = True
        spc_weight = spc_weight.detach()
        spc_weight.requires_grad = True

        if with_spc_to_dict:
            input_spc = Spc(octrees, lengths)
            output_features, output_level = spc.conv3d(
                **input_spc.to_dict(),
                level=input_spc.max_level,
                input=coalescent_features,
                weight=spc_weight,
                kernel_vectors=kernel_vectors,
                jump=jump,
                bias=bias)
            output = spc.to_dense(**input_spc.to_dict(),
                                  input=output_features,
                                  level=output_level)
            output_sparsity_masks = spc.to_dense(**input_spc.to_dict(),
                                                 input=torch.ones_like(
                                                     output_features,
                                                     requires_grad=False),
                                                 level=output_level)
        else:
            output_features, output_level = spc.conv3d(octrees,
                                                       point_hierarchies,
                                                       max_level,
                                                       pyramids,
                                                       exsum,
                                                       coalescent_features,
                                                       spc_weight,
                                                       kernel_vectors,
                                                       jump=jump,
                                                       bias=bias)
            output = spc.to_dense(point_hierarchies, pyramids, output_features,
                                  output_level)
            output_sparsity_masks = spc.to_dense(
                point_hierarchies, pyramids,
                torch.ones_like(output_features, requires_grad=False),
                output_level)

        feature_grids = feature_grids.detach()
        feature_grids.requires_grad = True
        dense_weight = dense_weight.detach()
        dense_weight.requires_grad = True

        padded_input = torch.nn.functional.pad(
            feature_grids, (kernel_offset, kernel_size - 1 - kernel_offset,
                            kernel_offset, kernel_size - 1 - kernel_offset,
                            kernel_offset, kernel_size - 1 - kernel_offset))
        expected_output = torch.nn.functional.conv3d(padded_input,
                                                     dense_weight,
                                                     stride=stride,
                                                     bias=bias)
        expected_height, expected_width, expected_depth = expected_output.shape[
            2:]
        expected_output *= output_sparsity_masks[:, :, :expected_height, :
                                                 expected_width, :
                                                 expected_depth]
        assert torch.allclose(
            output[:, :, :expected_height, :expected_width, :expected_depth],
            expected_output,
            atol=1e-3,
            rtol=1e-3)
        grad_output = torch.rand_like(output)
        output.backward(grad_output)
        expected_output.backward(grad_output[:, :, :expected_height, :
                                             expected_width, :expected_depth])

        _, _, sparsified_grad = spc.feature_grids_to_spc(
            feature_grids.grad, sparsity_masks)

        assert torch.allclose(coalescent_features.grad,
                              sparsified_grad,
                              rtol=1e-3,
                              atol=1e-3)
        assert torch.allclose(spc_weight.grad,
                              dense_weight.grad.reshape(
                                  out_channels, in_channels,
                                  -1).permute(2, 1, 0),
                              rtol=5e-2,
                              atol=5e-2)