def test_padded(self, octree: torch.Tensor):

        # Configuration
        depth = self.octree_creator._depth
        channels = 6 if self.octree_creator._include_color else 3
        num_outputs = 5

        # Pad octree to a specific, fixed length
        print(f'Original size: {octree.shape}')
        padded_size = 1000000
        octree_padded = torch.nn.ConstantPad1d(
            (0, padded_size - octree.shape[0]), 0)(octree)

        # Create batch from the octree and move it to VRAM (it has to be in VRAM for the next step)
        octree_batch = ocnn.octree_batch([octree_padded]).cuda()

        # Extract features from the octree
        data = ocnn.octree_property(octree_batch, 'feature', depth).cuda()
        assert data.size(1) == channels

        # Test simple convolution
        conv1 = ocnn.OctreeConv(depth, channels, num_outputs)
        conv1.cuda()
        out1 = conv1(data, octree_batch)

        # Test fast convolution
        conv2 = ocnn.OctreeConvFast(depth, channels, num_outputs)
        conv2.cuda()
        out2 = conv2(data, octree_batch)
예제 #2
0
def preprocess_stacked_octree_batch(observation: th.Tensor,
                                    device) -> Dict[str, th.Tensor]:
    # Note: Primordial magic is happening here,
    #       but there's no reason to tremble in fear.
    #       For your own good don't question it too much,
    #       it's just an optimised stacked octree batch...

    octrees = []
    for octree in observation.reshape(-1, observation.shape[-1]):
        # Get original octree size
        octree_size = np.frombuffer(buffer=octree[-4:],
                                    dtype='uint32',
                                    count=1)
        # Convert to tensor and append to list
        octrees.append(th.from_numpy(octree[:octree_size[0]]))
    # Make batch out of tensor (consisting of n-stacked frames)
    octree_batch = ocnn.octree_batch(octrees)

    # Get number of auxiliary observations encoded as float32 and parse them
    n_aux_obs_f32 = int(
        np.frombuffer(buffer=observation[0, 0, -8:-4], dtype='uint32',
                      count=1))
    aux_obs = th.from_numpy(
        np.frombuffer(
            buffer=observation[:, :, -(4 * n_aux_obs_f32 + 8):-8].reshape(-1),
            dtype='float32',
            count=n_aux_obs_f32 * observation.shape[0] *
            observation.shape[1]).reshape(observation.shape[:2] +
                                          (n_aux_obs_f32, )))

    return {'octree': octree_batch.to(device), 'aux_obs': aux_obs.to(device)}
예제 #3
0
    def test_forward_and_backward_max_pool(self):
        depth, channel, height = 5, 2, 16
        octree = ocnn.octree_batch(
            ocnn.octree_samples(['octree_1', 'octree_1']))
        data = np.array([[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8],
                         [8.1, 7.2, 6.3, 5.4, 4.5, 3.6, 2.7, 1.8]],
                        dtype=np.float32)
        data = np.concatenate([data, data], axis=1)
        data = np.reshape(data, (1, channel, height, 1))
        out_gt = np.array([[8.8, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                           [8.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
                          dtype=np.float32)
        out_gt = np.concatenate([out_gt, out_gt], axis=1)
        out_gt = np.reshape(out_gt, (1, channel, height, 1))
        grad_gt = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1],
                            [8.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
                           dtype=np.float32)
        grad_gt = np.concatenate([grad_gt, grad_gt], axis=1)
        grad_gt = np.reshape(grad_gt, (1, channel, height, 1))
        mask_gt = np.array([[[[7], [15]], [[0], [8]]]], dtype=np.int32)

        # forward
        octree = octree.to('cuda')
        data_in = torch.from_numpy(data).to('cuda').requires_grad_()
        outputs, mask_out = ocnn.OctreeMaxPool(depth)(data_in, octree)

        # backward
        pesudo_grad = torch.from_numpy(data).to('cuda')
        outputs.backward(pesudo_grad)

        # test
        self.assertTrue(
            np.array_equal(mask_out.cpu().detach().numpy(), mask_gt))
        self.assertTrue(np.array_equal(outputs.cpu().detach().numpy(), out_gt))
        self.assertTrue(np.array_equal(data_in.grad.cpu().numpy(), grad_gt))
예제 #4
0
 def test_xyz_key(self):
   samples = ocnn.octree_samples(['octree_1', 'octree_1'])
   octree = ocnn.octree_batch(samples).cuda()
   xyz = ocnn.octree_property(octree, 'xyz', 5)
   key = ocnn.octree_xyz2key(xyz, 5)
   xyz_out = ocnn.octree_key2xyz(key, 5)
   self.assertTrue((xyz == xyz_out).cpu().numpy().all())
예제 #5
0
 def test_search_key(self):
   samples = ocnn.octree_samples(['octree_1', 'octree_1'])
   octree = ocnn.octree_batch(samples).cuda()
   key = torch.cuda.LongTensor([28673, 281474976739335, 10])
   idx_gt = torch.cuda.IntTensor([1, 15, -1])
   idx = ocnn.octree_search_key(key, octree, 5, False)
   self.assertTrue((idx == idx_gt).cpu().numpy().all())
예제 #6
0
 def test_forward1(self):
     depth, channel, nnum = 4, 3, 16
     octree = ocnn.octree_batch(
         ocnn.octree_samples(['octree_1', 'octree_1'])).cuda()
     data = torch.ones([1, channel, nnum, 1], dtype=torch.float32).cuda()
     linear = ocnn.octree_trilinear(data, octree, depth, depth + 1)
     gt_result = np.ones([1, channel, 16, 1], dtype=np.float32)
     self.assertTrue((linear.cpu().numpy() == gt_result).all())
예제 #7
0
파일: transforms.py 프로젝트: skyejy/O-CNN
def collate_octrees(batch):
    ''' Merge a batch of octrees into one super octree
  '''
    assert type(batch) == list
    octrees = [b[0] for b in batch]
    octree = ocnn.octree_batch(octrees)

    labels = torch.tensor([b[1] for b in batch])
    return octree, labels
예제 #8
0
 def setUp(self):  # def initialize(self):
     self.depth = 1
     self.channel = 3
     self.octree = ocnn.octree_batch(ocnn.octree_samples(['octree_1']))
     self.data_in = np.random.uniform(
         -1.0, 1.0, [1, self.channel, 8, 1]).astype('float32')
     self.idx_maps = [
         list(range(0, 27)), [13], [13, 14, 16, 17, 22, 23, 25, 26],
         [4, 13, 22], [10, 13, 16], [12, 13, 14],
         [1, 4, 7, 10, 13, 16, 19, 22, 25],
         [3, 4, 5, 12, 13, 14, 21, 22, 23],
         [9, 10, 11, 12, 13, 14, 15, 16, 17]
     ]
예제 #9
0
    def __call__(self, batch):
        ''' Merge a batch of octrees into one super octree
    '''
        assert type(batch) == list
        octrees = [b[0] for b in batch]
        octree = ocnn.octree_batch(octrees)
        labels = torch.tensor([b[1] for b in batch])

        outputs = [octree, labels]
        if self.return_pts:
            points = [b[2] for b in batch]
            outputs.append(points)
        return outputs
예제 #10
0
    def octree_property(self, on_cuda=True):
        batch_size = 2
        octree = ocnn.octree_batch(
            ocnn.octree_samples(['octree_1'] * batch_size))
        if on_cuda:
            octree = octree.cuda()

        # test index
        out = ocnn.octree_property(octree, 'index', 5)
        out_gt = np.array([0] * 8 + [1] * 8)
        self.assertTrue(np.array_equal(out.cpu().numpy(), out_gt))

        # test feature
        out = ocnn.octree_property(octree, 'feature', 5)
        out_gt = np.zeros([3, 16], dtype=np.float32)
        out_gt[:, 0] = 3.0**0.5 / 3.0
        out_gt[:, 8] = 3.0**0.5 / 3.0
        out_gt = np.expand_dims(out_gt, axis=[0, 3])
        self.assertTrue(np.allclose(out.cpu().numpy(), out_gt))

        # test child
        out = ocnn.octree_property(octree, 'child', 5)
        out_gt = np.ones(16) * (-1)
        out_gt[0] = 0
        out_gt[8] = 1
        self.assertTrue(np.array_equal(out.cpu().numpy(), out_gt))
        # test child from depth=0
        out = torch.cat(
            [ocnn.octree_property(octree, 'child', d) for d in range(1, 6)])
        outs = ocnn.octree_property(octree, 'child')
        self.assertTrue(
            np.array_equal(outs[batch_size:].cpu().numpy(),
                           out.cpu().numpy()))

        # test node number
        nnums = np.array([2, 16, 128, 16, 16, 16])
        nnum_cums = np.array([0, 2, 18, 146, 162, 178, 194])
        node_num = ocnn.octree_property(octree, 'node_num', 5)
        node_nums = ocnn.octree_property(octree, 'node_num')
        node_num_cum = ocnn.octree_property(octree, 'node_num_cum', 5)
        node_nums_cum = ocnn.octree_property(octree, 'node_num_cum')
        self.assertTrue(node_num.item() == nnums[5])
        self.assertTrue(node_num_cum.item() == nnum_cums[5])
        self.assertTrue(np.array_equal(node_nums.cpu().numpy(), nnums))
        self.assertTrue(np.array_equal(node_nums_cum.cpu().numpy(), nnum_cums))

        # test batch_size, depth, full_depth
        self.assertTrue(
            ocnn.octree_property(octree, 'batch_size').item() == batch_size)
        self.assertTrue(ocnn.octree_property(octree, 'depth').item() == 5)
        self.assertTrue(ocnn.octree_property(octree, 'full_depth').item() == 2)
예제 #11
0
  def test_decode_encode_key(self):
    samples = ocnn.octree_samples(['octree_1', 'octree_1'])
    octree = ocnn.octree_batch(samples).cuda()
    xyz = ocnn.octree_property(octree, 'xyz', 5)
    pts = ocnn.octree_decode_key(xyz)
    xyz_encode = ocnn.octree_encode_key(pts)

    gt = torch.cuda.ShortTensor([
        [16, 16, 16, 0], [16, 16, 17, 0], [16, 17, 16, 0], [16, 17, 17, 0],
        [17, 16, 16, 0], [17, 16, 17, 0], [17, 17, 16, 0], [17, 17, 17, 0],
        [16, 16, 16, 1], [16, 16, 17, 1], [16, 17, 16, 1], [16, 17, 17, 1],
        [17, 16, 16, 1], [17, 16, 17, 1], [17, 17, 16, 1], [17, 17, 17, 1]])
    self.assertTrue((gt == pts).cpu().numpy().all())
    self.assertTrue((xyz_encode == xyz).cpu().numpy().all())
예제 #12
0
    def forward_and_backward(self, kernel_size, stride, idx=0):
        depth = 4
        channel = 3
        height = 152
        num_outputs = 2
        octree = ocnn.octree_batch(
            ocnn.octree_samples(['octree_1', 'octree_2']))
        data = np.random.uniform(-1.0, 1.0,
                                 [1, channel, height, 1]).astype('float32')

        # forward
        deconv1 = ocnn.OctreeDeconv(depth, channel, num_outputs, kernel_size,
                                    stride)
        deconv2 = ocnn.OctreeDeconvFast(depth, channel, num_outputs,
                                        kernel_size, stride)

        # use the same initialization
        with torch.no_grad():
            deconv2.weights.data = deconv1.weights.data

        # forward
        octree = octree.to('cuda')
        deconv1.to('cuda')
        data1 = torch.from_numpy(data).to('cuda').requires_grad_()
        out1 = deconv1(data1, octree)
        deconv2.to('cuda')
        data2 = torch.from_numpy(data).to('cuda').requires_grad_()
        out2 = deconv2(data2, octree)

        # backward
        pesudo_grad = torch.rand(out1.shape,
                                 dtype=out1.dtype,
                                 device=out1.device)
        out1.backward(pesudo_grad)
        out2.backward(pesudo_grad)

        # test
        self.assertTrue(
            np.allclose(out1.cpu().detach().numpy(),
                        out2.cpu().detach().numpy(),
                        atol=1e-6))
        self.assertTrue(
            np.allclose(data1.grad.cpu().numpy(),
                        data2.grad.cpu().numpy(),
                        atol=1e-06))
        self.assertTrue(
            np.allclose(deconv1.weights.grad.cpu().numpy(),
                        deconv2.weights.grad.cpu().numpy(),
                        atol=1e-06))
예제 #13
0
    def test_octree2colP(self):
        depth = 4
        channel = 5
        stride = [1, 2]
        kernel_size = [[3, 3, 3], [2, 2, 2], [3, 1, 1], [3, 3, 1], [1, 1, 1]]
        samples = ocnn.octree_samples(
            ['octree_1', 'octree_2', 'octree_2', 'octree_1'])
        octree = ocnn.octree_batch(samples).cuda()
        node_num = ocnn.octree_property(octree, 'node_num', depth)
        data_in = torch.rand(1, channel, node_num.item(), 1).cuda()
        data_in = ocnn.octree_depad(data_in, octree, depth)
        data_in1 = data_in.clone().requires_grad_()
        data1 = ocnn.octree_pad(data_in1, octree, depth, 0)
        data_in2 = data_in.clone().requires_grad_()

        # octree2colP = octree2col + depad
        for i in range(len(stride)):
            for j in range(len(kernel_size)):
                out1 = ocnn.octree2col(data1, octree, depth, kernel_size[j],
                                       stride[i], False)
                if stride[i] == 1:
                    ks, height = out1.size(1), out1.size(2)
                    out1 = out1.view(1, -1, height, 1)
                    out1 = ocnn.octree_depad(out1, octree, depth)
                    out1 = out1.view(channel, ks, -1)
                out2 = ocnn.octree2col(data_in2, octree, depth, kernel_size[j],
                                       stride[i], True)

                pesudo_grad = torch.rand(out1.shape,
                                         dtype=out1.dtype,
                                         device=out1.device)
                out1.backward(pesudo_grad, retain_graph=True)
                out2.backward(pesudo_grad, retain_graph=True)

                # check
                self.assertTrue(
                    np.array_equal(out1.detach().cpu().numpy(),
                                   out2.detach().cpu().numpy()))
                self.assertTrue(
                    np.allclose(data_in1.grad.cpu().numpy(),
                                data_in2.grad.cpu().numpy()))
예제 #14
0
def collate_octrees(batch):
    assert type(batch) == list

    outputs = {}
    for key in batch[0].keys():
        outputs[key] = [b[key] for b in batch]

        # Merge a batch of octrees into one super octree
        if 'octree' in key:
            outputs[key] = ocnn.octree_batch(outputs[key])

        # Convert the labels to a Tensor
        if 'label' in key:
            outputs['label'] = torch.tensor(outputs[key])

        # # Concat the inbox_mask
        # if 'inbox_mask' in key:
        #   pt_num = [mk.numel() for mk in outputs['inbox_mask']]
        #   outputs['pt_num'] = torch.tensor(pt_num)
        #   outputs['inbox_mask'] = torch.cat(outputs['inbox_mask'], dim=0)

    return outputs
예제 #15
0
    def test_octree_property(self):
        octree = ocnn.octree_batch(ocnn.octree_samples(['octree_1'] *
                                                       2)).cuda()

        # test index
        out = ocnn.octree_property(octree, 'index', 5)
        out_gt = np.array([0] * 8 + [1] * 8)
        self.assertTrue(np.array_equal(out.cpu().numpy(), out_gt))

        # test feature
        out = ocnn.octree_property(octree, 'feature', 5)
        out_gt = np.zeros([3, 16], dtype=np.float32)
        out_gt[:, 0] = 3.0**0.5 / 3.0
        out_gt[:, 8] = 3.0**0.5 / 3.0
        out_gt = np.expand_dims(out_gt, axis=[0, 3])
        self.assertTrue(np.allclose(out.cpu().numpy(), out_gt))

        # test child
        out = ocnn.octree_property(octree, 'child', 5)
        out_gt = np.ones(16) * (-1)
        out_gt[0] = 0
        out_gt[8] = 1
        self.assertTrue(np.array_equal(out.cpu().numpy(), out_gt))
예제 #16
0
    def test_simple(self, octree: torch.Tensor):

        # Configuration
        depth = self.octree_creator._depth
        channels = 6 if self.octree_creator._include_color else 3
        num_outputs = 5

        # Create batch from the octree and move it to VRAM (it has to be in VRAM for the next step)
        octree_batch = ocnn.octree_batch([octree]).cuda()

        # Extract features from the octree
        data = ocnn.octree_property(octree_batch, 'feature', depth).cuda()
        assert data.size(1) == channels

        # Test simple convolution
        conv1 = ocnn.OctreeConv(depth, channels, num_outputs)
        conv1.cuda()
        out1 = conv1(data, octree_batch)

        # Test fast convolution
        conv2 = ocnn.OctreeConvFast(depth, channels, num_outputs)
        conv2.cuda()
        out2 = conv2(data, octree_batch)
예제 #17
0
    def forward_and_backward(self, kernel_size, stride, idx=0):
        depth = 4
        channel = 3
        height = 152
        num_outputs = 5
        octree = ocnn.octree_batch(
            ocnn.octree_samples(['octree_1', 'octree_2']))
        data = np.random.uniform(-1.0, 1.0,
                                 [1, channel, height, 1]).astype('float32')

        # forward
        conv1 = ocnn.OctreeDeconv(depth, channel, num_outputs, kernel_size,
                                  stride)
        conv2 = ocnn.OctreeDeconvFast(depth, channel, num_outputs, kernel_size,
                                      stride)
        conv3 = ocnn.OctreeDeconv(depth, channel, num_outputs, kernel_size,
                                  stride, True)
        conv4 = ocnn.OctreeDeconv(depth, channel, num_outputs, kernel_size,
                                  stride)

        # use the same initialization
        with torch.no_grad():
            conv2.weights.data.copy_(conv1.weights.data)
            conv3.weights.data.copy_(conv1.weights.data)
            conv4.weights.data.copy_(conv1.weights.data)

        # forward - compare OctreeConv and OctreeConvFast
        octree = octree.cuda()
        conv1.cuda()
        data1 = torch.from_numpy(data).cuda().requires_grad_()
        out1 = conv1(data1, octree)

        conv2.cuda()
        data2 = torch.from_numpy(data).cuda().requires_grad_()
        out2 = conv2(data2, octree)

        # forward - compare OctreeConv with nempty = True and False
        conv3.cuda()
        mask3 = ocnn.octree_property(octree, 'child', depth) >= 0
        data3 = torch.from_numpy(data).cuda().requires_grad_()
        tmp3 = data3[:, :, mask3]
        out3 = conv3(tmp3, octree)

        conv4.cuda()
        depth_out = depth if stride == 1 else depth + 1
        mask4 = ocnn.octree_property(octree, 'child', depth_out) >= 0
        data4 = torch.from_numpy(data).cuda().requires_grad_()
        tmp4 = data4 * mask3.unsqueeze(-1).float()
        tmp4 = conv4(tmp4, octree)
        out4 = tmp4[:, :, mask4]

        # backward
        pesudo_grad = torch.rand(out1.shape,
                                 dtype=out1.dtype,
                                 device=out1.device)
        out1.backward(pesudo_grad)
        out2.backward(pesudo_grad)

        pesudo_grad2 = torch.rand(out3.shape,
                                  dtype=out3.dtype,
                                  device=out3.device)
        out3.backward(pesudo_grad2)
        out4.backward(pesudo_grad2)

        # test
        self.assertTrue(
            np.allclose(out1.cpu().detach().numpy(),
                        out2.cpu().detach().numpy(),
                        atol=1e-6))
        self.assertTrue(
            np.allclose(data1.grad.cpu().numpy(),
                        data2.grad.cpu().numpy(),
                        atol=1e-06))
        self.assertTrue(
            np.allclose(conv1.weights.grad.cpu().numpy(),
                        conv2.weights.grad.cpu().numpy(),
                        atol=1e-06))

        self.assertTrue(
            np.allclose(out3.cpu().detach().numpy(),
                        out4.cpu().detach().numpy(),
                        atol=1e-06))
        self.assertTrue(
            np.allclose(data3.grad.cpu().numpy(),
                        data4.grad.cpu().numpy(),
                        atol=1e-06))
        self.assertTrue(
            np.allclose(conv3.weights.grad.cpu().numpy(),
                        conv4.weights.grad.cpu().numpy(),
                        atol=1e-06))
예제 #18
0
 def get_octree(self, filelist):
     batch = ocnn.octree_samples(filelist)
     return ocnn.octree_batch(batch).cuda()
예제 #19
0
import ocnn
import torch
from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter('logs/resnet')
octree = ocnn.octree_batch(ocnn.octree_samples(['octree_1', 'octree_2']))
model = ocnn.ResNet(depth=5, channel_in=3, nout=4, resblk_num=2)
print(model)

octree = octree.cuda()
model = model.cuda()
writer.add_graph(model, octree)
writer.flush()