def test_padded_to_list(self):
        device = torch.device("cuda:0")
        N = 5
        K = 20
        ndim = 2

        for ndim in (2, 3, 4):

            dims = [K] * ndim
            x = torch.rand([N] + dims, device=device)

            x_list = struct_utils.padded_to_list(x)
            for i in range(N):
                self.assertClose(x_list[i], x[i])

            split_size = torch.randint(1, K, size=(N, ndim)).unbind(0)
            x_list = struct_utils.padded_to_list(x, split_size)
            for i in range(N):
                slices = [i]
                for dim in range(ndim):
                    slices.append(slice(0, split_size[i][dim], 1))
                self.assertClose(x_list[i], x[slices])

            # split size is a list of ints
            split_size = [int(z) for z in torch.randint(1, K, size=(N,)).unbind(0)]
            x_list = struct_utils.padded_to_list(x, split_size)
            for i in range(N):
                self.assertClose(x_list[i], x[i][: split_size[i]])
Beispiel #2
0
    def test_padded_to_list(self):
        device = torch.device('cuda:0')
        N = 5
        K = 20
        ndim = 2
        dims = [K] * ndim
        x = torch.rand([N] + dims, device=device)

        x_list = struct_utils.padded_to_list(x)
        for i in range(N):
            self.assertClose(x_list[i], x[i])

        split_size = torch.randint(1, K, size=(N, )).tolist()
        x_list = struct_utils.padded_to_list(x, split_size)
        for i in range(N):
            self.assertClose(x_list[i], x[i, :split_size[i]])

        split_size = torch.randint(1, K, size=(2 * N, )).view(N, 2).unbind(0)
        x_list = struct_utils.padded_to_list(x, split_size)
        for i in range(N):
            self.assertClose(x_list[i],
                             x[i, :split_size[i][0], :split_size[i][1]])

        with self.assertRaisesRegex(ValueError, 'Supports only'):
            x = torch.rand((N, K, K, K, K), device=device)
            split_size = torch.randint(1, K, size=(N, )).tolist()
            struct_utils.padded_to_list(x, split_size)
Beispiel #3
0
def _padded_to_list_wrapper(
    x: torch.Tensor, split_size: Union[list, tuple, None] = None
) -> List[torch.Tensor]:
    r"""
    This is a wrapper function for pytorch3d.structures.utils.padded_to_list
    which only accepts 3-dimensional inputs.

    For this use case, the input x is of shape (N, F, ...) where F
    is the number of faces which is different for each tensor in the batch.

    This function transforms a padded tensor of shape (N, M, ...) into a
    list of N tensors of shape (Mi, ...) where (Mi) is specified in
    split_size(i), or of shape (M,) if split_size is None.

    Args:
      x: padded Tensor
      split_size: list of ints defining the number of items for each tensor
        in the output list.

    Returns:
      x_list: a list of tensors
    """
    N, M = x.shape[:2]
    reshape_dims = x.shape[2:]
    D = torch.prod(torch.tensor(reshape_dims)).item()
    x_reshaped = x.reshape(N, M, D)
    x_list = padded_to_list(x_reshaped, split_size=split_size)
    x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list]
    return x_list
    def test_estimate_normals(self):
        for with_normals in (True, False):
            for run_padded in (True, False):
                for run_packed in (True, False):

                    clouds = TestPointclouds.init_cloud(
                        3,
                        100,
                        with_normals=with_normals,
                        with_features=False,
                        min_points=60,
                    )
                    nums = clouds.num_points_per_cloud()
                    if run_padded:
                        clouds.points_padded()
                    if run_packed:
                        clouds.points_packed()

                    normals_est_padded = clouds.estimate_normals(assign_to_self=True)
                    normals_est_list = struct_utils.padded_to_list(
                        normals_est_padded, nums.tolist()
                    )
                    self.assertClose(clouds.normals_padded(), normals_est_padded)
                    for i in range(len(clouds)):
                        self.assertClose(clouds.normals_list()[i], normals_est_list[i])
                    self.assertClose(
                        clouds.normals_packed(), torch.cat(normals_est_list, dim=0)
                    )
Beispiel #5
0
 def faces_uvs_list(self) -> List[torch.Tensor]:
     if self._faces_uvs_list is None:
         if self.isempty():
             self._faces_uvs_list = [
                 torch.empty((0, 3), dtype=torch.float32, device=self.device)
             ] * self._N
         else:
             self._faces_uvs_list = padded_to_list(
                 self._faces_uvs_padded, split_size=self._num_faces_per_mesh
             )
     return self._faces_uvs_list
Beispiel #6
0
    def test_padded_to_packed(self):
        device = torch.device('cuda:0')
        N = 5
        K = 20
        ndim = 2
        dims = [K] * ndim
        x = torch.rand([N] + dims, device=device)

        # Case 1: no split_size or pad_value provided
        # Check output is just the flattened input.
        x_packed = struct_utils.padded_to_packed(x)
        self.assertTrue(x_packed.shape == (x.shape[0] * x.shape[1],
                                           x.shape[2]))
        self.assertClose(x_packed, x.reshape(-1, K))

        # Case 2: pad_value is provided.
        # Check each section of the packed tensor matches the
        # corresponding unpadded elements of the padded tensor.
        # Check that only rows where all the values are padded
        # are removed in the conversion to packed.
        pad_value = -1
        x_list = []
        split_size = []
        for _ in range(N):
            dim = torch.randint(K, size=(1, )).item()
            # Add some random values in the input which are the same as the pad_value.
            # These should not be filtered out.
            x_list.append(
                torch.randint(low=pad_value,
                              high=10,
                              size=(dim, K),
                              device=device))
            split_size.append(dim)
        x_padded = struct_utils.list_to_padded(x_list, pad_value=pad_value)
        x_packed = struct_utils.padded_to_packed(x_padded, pad_value=pad_value)
        curr = 0
        for i in range(N):
            self.assertClose(x_packed[curr:curr + split_size[i], ...],
                             x_list[i])
            self.assertClose(torch.cat(x_list), x_packed)
            curr += split_size[i]

        # Case 3: split_size is provided.
        # Check each section of the packed tensor matches the corresponding
        # unpadded elements.
        x_packed = struct_utils.padded_to_packed(x_padded,
                                                 split_size=split_size)
        curr = 0
        for i in range(N):
            self.assertClose(x_packed[curr:curr + split_size[i], ...],
                             x_list[i])
            self.assertClose(torch.cat(x_list), x_packed)
            curr += split_size[i]

        # Case 4: split_size of the wrong shape is provided.
        # Raise an error.
        split_size = torch.randint(1, K, size=(2 * N, )).view(N, 2).unbind(0)
        with self.assertRaisesRegex(ValueError, '1-dimensional'):
            x_packed = struct_utils.padded_to_packed(x_padded,
                                                     split_size=split_size)

        split_size = torch.randint(1, K, size=(2 * N, )).view(N * 2).tolist()
        with self.assertRaisesRegex(ValueError,
                                    'same length as inputs first dimension'):
            x_packed = struct_utils.padded_to_packed(x_padded,
                                                     split_size=split_size)

        # Case 5: both pad_value and split_size are provided.
        # Raise an error.
        with self.assertRaisesRegex(ValueError, 'Only one of'):
            x_packed = struct_utils.padded_to_packed(x_padded,
                                                     split_size=split_size,
                                                     pad_value=-1)

        # Case 6: Input has more than 3 dims.
        # Raise an error.
        with self.assertRaisesRegex(ValueError, 'Supports only'):
            x = torch.rand((N, K, K, K, K), device=device)
            split_size = torch.randint(1, K, size=(N, )).tolist()
            struct_utils.padded_to_list(x, split_size)