Ejemplo n.º 1
0
    def test_padded(self, octree: torch.Tensor):

        # Configuration
        depth = self.octree_creator._depth
        channels = 6 if self.octree_creator._include_color else 3
        num_outputs = 5

        # Pad octree to a specific, fixed length
        print(f'Original size: {octree.shape}')
        padded_size = 1000000
        octree_padded = torch.nn.ConstantPad1d(
            (0, padded_size - octree.shape[0]), 0)(octree)

        # Create batch from the octree and move it to VRAM (it has to be in VRAM for the next step)
        octree_batch = ocnn.octree_batch([octree_padded]).cuda()

        # Extract features from the octree
        data = ocnn.octree_property(octree_batch, 'feature', depth).cuda()
        assert data.size(1) == channels

        # Test simple convolution
        conv1 = ocnn.OctreeConv(depth, channels, num_outputs)
        conv1.cuda()
        out1 = conv1(data, octree_batch)

        # Test fast convolution
        conv2 = ocnn.OctreeConvFast(depth, channels, num_outputs)
        conv2.cuda()
        out2 = conv2(data, octree_batch)
Ejemplo n.º 2
0
 def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
     super(OctreeConvRelu, self).__init__()
     self.conv = ocnn.OctreeConv(depth,
                                 channel_in,
                                 channel_out,
                                 kernel_size,
                                 stride)
     self.relu = torch.nn.ReLU(inplace=True)
Ejemplo n.º 3
0
 def __init__(self,
              depth,
              channel_in,
              channel_out,
              kernel_size=[3],
              stride=1):
     super(OctreeConvBn, self).__init__()
     self.conv = ocnn.OctreeConv(depth, channel_in, channel_out,
                                 kernel_size, stride)
     self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
Ejemplo n.º 4
0
 def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1, bn_eps=0.00001, bn_momentum=0.01):
     super(OctreeConvBnRelu, self).__init__()
     self.conv = ocnn.OctreeConv(depth,
                                 channel_in,
                                 channel_out,
                                 kernel_size,
                                 stride)
     self.bn = torch.nn.BatchNorm2d(channel_out,
                                    bn_eps,
                                    bn_momentum)
     self.relu = torch.nn.ReLU(inplace=True)
Ejemplo n.º 5
0
 def __init__(self,
              depth,
              channel_in,
              channel_out,
              kernel_size=[3],
              stride=1,
              nempty=False):
     super().__init__()
     self.conv = ocnn.OctreeConv(depth, channel_in, channel_out,
                                 kernel_size, stride, nempty)
     self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
     self.relu = torch.nn.ReLU(inplace=True)
Ejemplo n.º 6
0
    def forward_and_backward(self, kernel_size, stride):
        depth = 4
        channel = 3
        height = 152
        num_outputs = 5
        octree = ocnn.octree_batch(
            ocnn.octree_samples(['octree_1', 'octree_2']))
        data = np.random.uniform(-1.0, 1.0,
                                 [1, channel, height, 1]).astype('float32')

        # forward
        conv1 = ocnn.OctreeConv(depth, channel, num_outputs, kernel_size,
                                stride)
        conv2 = ocnn.OctreeConvFast(depth, channel, num_outputs, kernel_size,
                                    stride)

        # use the same initialization
        with torch.no_grad():
            conv2.weights.data = conv1.weights.data

        # forward
        octree = octree.to('cuda')
        conv1.to('cuda')
        data1 = torch.from_numpy(data).to('cuda').requires_grad_()
        out1 = conv1(data1, octree)
        conv2.to('cuda')
        data2 = torch.from_numpy(data).to('cuda').requires_grad_()
        out2 = conv2(data2, octree)

        # backward
        pesudo_grad = torch.rand(out1.shape,
                                 dtype=out1.dtype,
                                 device=out1.device)
        out1.backward(pesudo_grad)
        out2.backward(pesudo_grad)

        # test
        self.assertTrue(
            np.array_equal(out1.cpu().detach().numpy(),
                           out2.cpu().detach().numpy()))
        self.assertTrue(
            np.allclose(data1.grad.cpu().numpy(),
                        data2.grad.cpu().numpy(),
                        atol=1e-06))
        self.assertTrue(
            np.allclose(conv1.weights.grad.cpu().numpy(),
                        conv2.weights.grad.cpu().numpy(),
                        atol=1e-06))
Ejemplo n.º 7
0
    def test_simple(self, octree: torch.Tensor):

        # Configuration
        depth = self.octree_creator._depth
        channels = 6 if self.octree_creator._include_color else 3
        num_outputs = 5

        # Create batch from the octree and move it to VRAM (it has to be in VRAM for the next step)
        octree_batch = ocnn.octree_batch([octree]).cuda()

        # Extract features from the octree
        data = ocnn.octree_property(octree_batch, 'feature', depth).cuda()
        assert data.size(1) == channels

        # Test simple convolution
        conv1 = ocnn.OctreeConv(depth, channels, num_outputs)
        conv1.cuda()
        out1 = conv1(data, octree_batch)

        # Test fast convolution
        conv2 = ocnn.OctreeConvFast(depth, channels, num_outputs)
        conv2.cuda()
        out2 = conv2(data, octree_batch)
Ejemplo n.º 8
0
  def forward_and_backward(self, kernel_size, stride):
    depth = 4
    channel = 3
    height = 152
    num_outputs = 5
    octree = ocnn.octree_batch(ocnn.octree_samples(['octree_1', 'octree_2']))
    data = np.random.uniform(-1.0, 1.0, [1, channel, height, 1]).astype('float32')

    # forward
    conv1 = ocnn.OctreeConv(depth, channel, num_outputs, kernel_size, stride)
    conv2 = ocnn.OctreeConvFast(depth, channel, num_outputs, kernel_size, stride)
    conv3 = ocnn.OctreeConv(depth, channel, num_outputs, kernel_size, stride, True)
    conv4 = ocnn.OctreeConv(depth, channel, num_outputs, kernel_size, stride)

    # use the same initialization
    with torch.no_grad():
      conv2.weights.data.copy_(conv1.weights.data)
      conv3.weights.data.copy_(conv1.weights.data)
      conv4.weights.data.copy_(conv1.weights.data)

    # forward - compare OctreeConv and OctreeConvFast
    octree = octree.cuda()
    conv1.cuda()
    data1 = torch.from_numpy(data).cuda().requires_grad_()
    out1 = conv1(data1, octree)

    conv2.cuda()
    data2 = torch.from_numpy(data).cuda().requires_grad_()
    out2 = conv2(data2, octree)

    # forward - compare OctreeConv with nempty = True and False
    conv3.cuda()
    mask3 = ocnn.octree_property(octree, 'child', depth) >= 0
    data3 = torch.from_numpy(data).cuda().requires_grad_()
    tmp3 = data3[:, :, mask3]
    out3 = conv3(tmp3, octree)

    conv4.cuda()
    depth_out = depth if stride == 1 else depth - 1
    mask4 = ocnn.octree_property(octree, 'child', depth_out) >= 0
    data4 = torch.from_numpy(data).cuda().requires_grad_()
    tmp4 = data4 * mask3.unsqueeze(-1).float()
    tmp4 = conv4(tmp4, octree)
    out4 = tmp4[:, :, mask4]

    # backward
    pesudo_grad1 = torch.rand(out1.shape, dtype=out1.dtype, device=out1.device)
    out1.backward(pesudo_grad1)
    out2.backward(pesudo_grad1)

    pesudo_grad2 = torch.rand(out3.shape, dtype=out3.dtype, device=out3.device)
    out3.backward(pesudo_grad2)
    out4.backward(pesudo_grad2)

    # test
    self.assertTrue(np.array_equal(out1.cpu().detach().numpy(),
                                   out2.cpu().detach().numpy()))
    self.assertTrue(np.allclose(data1.grad.cpu().numpy(),
                                data2.grad.cpu().numpy(),
                                atol=1e-06))
    self.assertTrue(np.allclose(conv1.weights.grad.cpu().numpy(),
                                conv2.weights.grad.cpu().numpy(),
                                atol=1e-06))

    self.assertTrue(np.allclose(out3.cpu().detach().numpy(),
                                out4.cpu().detach().numpy(),
                                atol=1e-06))
    self.assertTrue(np.allclose(data3.grad.cpu().numpy(),
                                data4.grad.cpu().numpy(),
                                atol=1e-06))
    self.assertTrue(np.allclose(conv3.weights.grad.cpu().numpy(),
                                conv4.weights.grad.cpu().numpy(),
                                atol=1e-06))