예제 #1
0
def test_smoke(device='cpu'):
    g = kal.models.Voxel3DIWGAN.Voxel3DIWGenerator()
    d = kal.models.Voxel3DIWGAN.Voxel3DIWDiscriminator()
    g = g.to(device)
    d = d.to(device)
    x = torch.randn(4, 200)
    x_ = g(x)
    x_ = d(x_)
    helpers._assert_shape_eq(x, x_.shape, dim=0)
예제 #2
0
def realign(src: Union[torch.Tensor, PointCloud],
            tgt: Union[torch.Tensor, PointCloud],
            inplace: Optional[bool] = True):
    r""" Aligns a pointcloud `src` to be in the same (axis-aligned) bounding
    box as that of pointcloud `tgt`.

    Args:
        src (torch.Tensor or PointCloud) : Source pointcloud to be transformed
            (shape: :math:`\cdots \times N \times D`, where :math:`N` is the
            number of points in the pointcloud, and :math:`D` is the
            dimensionality of each point in the cloud).
        tgt (torch.Tensor or PointCloud) : Target pointcloud to which `src`is
            to be transformed (The `src` cloud is transformed to the
            axis-aligned bounding box that the target cloud maps to). This
            cloud must have the same number of dimensions :math:`D` as in the
            source cloud. (shape: :math:`\cdots \times \cdots \times D`).
        inplace (bool, optional): Bool to make the transform in-place.

    Returns:
        (torch.Tensor): Pointcloud `src` realigned to fit in the (axis-aligned)
            bounding box of the `tgt` cloud.

    Example:
        >>> tgt = torch.rand(1000)
        >>> src = (tgt * 100) + 3
        >>> src_realigned = realign(src, tgt)

    """
    if isinstance(src, PointCloud):
        src = src.points
    if isinstance(tgt, PointCloud):
        tgt = tgt.points
    helpers._assert_tensor(src)
    helpers._assert_tensor(tgt)
    helpers._assert_dim_ge(src, 2)
    helpers._assert_dim_ge(tgt, 2)
    helpers._assert_shape_eq(src, tgt.shape, dim=-1)

    if not inplace:
        src = src.clone()

    # Compute the relative scaling factor and scale the src cloud.
    src_min, _ = src.min(-2)
    src_max, _ = src.max(-2)
    tgt_min, _ = tgt.min(-2)
    tgt_max, _ = tgt.max(-2)
    src_min = src_min.unsqueeze(-2)
    src_max = src_max.unsqueeze(-2)
    tgt_min = tgt_min.unsqueeze(-2)
    tgt_max = tgt_max.unsqueeze(-2)
    # Center the pointclouds.
    src = src - src.mean(-2).unsqueeze(-2)
    src = ((tgt_max - tgt_min) / (src_max - src_min + EPS)) * src
    # Undo the centering translation, and return the result.
    return src + tgt.mean(-2).unsqueeze(-2)
예제 #3
0
def rotate(cloud: Union[torch.Tensor, PointCloud],
           rotmat: torch.Tensor,
           inplace: Optional[bool] = True):
    """Rotates the the input pointcloud by a rotation matrix.

    Args:
        cloud (Tensor or np.array): pointcloud (ndims = 2 or 3)
        rotmat (Tensor or np.array): rotation matrix (3 x 3, 1 per cloud).
        inplace (bool, optional): Bool to make the transform in-place.

    Returns:
        cloud_rot (Tensor): rotated pointcloud of the same shape as input

    Shape:
        - cloud: :math:`(B x N x 3)` (or) :math:`(N x 3)`, where :math:`(B)`
            is the batchsize, :math:`(N)` is the number of points per cloud,
            and :math:`(3)` is the dimensionality of each cloud.
        - rotmat: :math:`(3, 3)` or :math:`(B, 3, 3)`.

    Example:
        >>> points = torch.rand(1000,3)
        >>> r_mat = torch.rand(3,3)
        >>> points2 = rotate(points, r_mat)

    """
    if isinstance(cloud, np.ndarray):
        cloud = torch.from_numpy(cloud)
    if isinstance(cloud, PointCloud):
        cloud = cloud.points
    if isinstance(rotmat, np.ndarray):
        rotmat = torch.from_numpy(rotmat)

    helpers._assert_tensor(cloud)
    helpers._assert_tensor(rotmat)
    helpers._assert_dim_ge(cloud, 2)
    helpers._assert_dim_ge(rotmat, 2)
    # Rotation matrix must have last two dimensions of shape 3.
    helpers._assert_shape_eq(rotmat, (3, 3), dim=-1)
    helpers._assert_shape_eq(rotmat, (3, 3), dim=-2)

    if not inplace:
        cloud = cloud.clone()

    if rotmat.dim() == 2 and cloud.dim() == 2:
        cloud = torch.mm(rotmat, cloud.transpose(0, 1)).transpose(0, 1)
    else:
        if rotmat.dim() == 2:
            rotmat = rotmat.expand(cloud.shape[0], 3, 3)
        cloud = torch.bmm(rotmat, cloud.transpose(1, 2)).transpose(1, 2)

    return cloud
예제 #4
0
def test_smoke():
    model = kal.models.PointNet2.PointNet2Classifier(
        in_features=4,
        num_classes=13,
        batchnorm=True,
    ).cuda()
    x = torch.randn(2, 128, 7).cuda()
    out = model(x)
    helpers._assert_shape_eq(out, (2, 13))

    model = kal.models.PointNet2.PointNet2Segmenter(
        in_features=4,
        num_classes=13,
        batchnorm=True,
    ).cuda()
    x = torch.randn(2, 128, 7).cuda()
    out = model(x)
    helpers._assert_shape_eq(out, (2, 128, 13))
예제 #5
0
def test_downsample_voxelgrid(device='cpu'):
    voxel = torch.ones([32, 32, 32]).to(device)
    down = kal.transforms.DownsampleVoxelGrid([2,2,2], inplace=False)
    helpers._assert_shape_eq(down(voxel), (16,16,16))
    down = kal.transforms.DownsampleVoxelGrid([3,3,3], inplace=False)
    helpers._assert_shape_eq(down(voxel), (10,10,10))
    down = kal.transforms.DownsampleVoxelGrid([3,2,1], inplace=False)
    helpers._assert_shape_eq(down(voxel), (10,16,32))
예제 #6
0
    def __init__(self,
                 points: Optional[torch.Tensor] = None,
                 normals: torch.Tensor = None,
                 device: Optional[str] = 'cpu',
                 copy: Optional[bool] = False):
        r"""Initialize a PointCloud object, given a tensor of points, and
        optionally, a tensor representing poincloud normals.

        Args:
            pts (torch.Tensor): Points that make up the pointcloud (shape:
                :math:`... \times N \times D`), where :math:`N` denotes the
                number of points in the cloud, and :math:`D` denotes the
                dimensionality of each point.
            normals (torch.Tensor): Normals for each point in the cloud
                (shape: :math:`N \times D`, where `D` = 2 or `D` = 3).
                That is, normals can only be provided for 2D or 3D pointclouds.
            device (str, Optional): Device to store the pointcloud object on
                (default: 'cpu'). Must be a valid `torch.device` type.
            copy (bool, Optional): Whether or not to create a deep copy of the
                Tensor(s) used to initialze class members.

        """
        if points is None:
            self.points = None
        else:
            helpers._assert_tensor(points)
            helpers._assert_dim_ge(points, 2)
            self.points = points.clone() if copy else points
            self.points = self.points.to(device)
        if normals is None:
            self.normals = None
        else:
            helpers._assert_tensor(normals)
            if points.dim() == 2:
                helpers._assert_shape_eq(normals, (points.shape[-2], 3))
            self.normals = normals.clone() if copy else normals
            self.normals = self.normals.to(device)
예제 #7
0
def rotate(mesh: Type[Mesh], rotmat: torch.Tensor,
           inplace: Optional[bool] = True):
    r"""Rotate a mesh given a 3 x 3 rotation matrix.

    Args:
        mesh (Mesh): Mesh to be rotated.
        rotmat (torch.Tensor): Rotation matrix (shape: :math:`3 \times 3`).
        inplace (bool, optional): Bool to make this operation in-place.

    Returns:
        (Mesh): Rotatted mesh.
    """
    if not isinstance(mesh, Mesh):
        raise TypeError('Input mesh must be of type Mesh. '
            'Got {0} instead.'.format(type(mesh)))
    if not inplace:
        mesh = mesh.clone()

    helpers._assert_tensor(rotmat)
    helpers._assert_shape_eq(rotmat, (3, 3))

    mesh.vertices = torch.matmul(rotmat, mesh.vertices.t()).t()

    return mesh
예제 #8
0
def test_smoke(device='cpu'):
    model = kal.models.PointNet.PointNetClassifier(
        in_channels=3,
        feat_size=1024,
        num_classes=13,
        batchnorm=True,
    )
    x = torch.randn(2, 128, 3)
    out = model(x)
    helpers._assert_shape_eq(out, (2, 13))

    model = kal.models.PointNet.PointNetClassifier(
        in_channels=3,
        feat_size=1024,
        num_classes=13,
        batchnorm=True,
        transposed_input=True,
    )
    x = torch.randn(2, 3, 128)
    out = model(x)
    helpers._assert_shape_eq(out, (2, 13))

    model = kal.models.PointNet.PointNetSegmenter(
        in_channels=3,
        feat_size=1024,
        num_classes=13,
        batchnorm=True,
    )
    x = torch.randn(2, 128, 3)
    out = model(x)
    helpers._assert_shape_eq(out, (2, 128, 13))

    model = kal.models.PointNet.PointNetSegmenter(
        in_channels=3,
        feat_size=1024,
        num_classes=13,
        batchnorm=True,
        transposed_input=True,
    )
    x = torch.randn(2, 3, 128)
    out = model(x)
    helpers._assert_shape_eq(out, (2, 128, 13))
예제 #9
0
def sample_triangle_mesh(vertices: torch.Tensor, faces: torch.Tensor,
                         num_samples: int, eps: float = 1e-10):
    r""" Uniformly samples the surface of a mesh.

    Args:
        vertices (torch.Tensor): Vertices of the mesh (shape:
            :math:`N \times 3`, where :math:`N` is the number of vertices)
        faces (torch.LongTensor): Faces of the mesh (shape: :math:`F \times 3`,
            where :math:`F` is the number of faces).
        num_samples (int): Number of points to sample
        eps (float): A small number to prevent division by zero
                     for small surface areas.

    Returns:
        (torch.Tensor): Uniformly sampled points from the triangle mesh.

    Example:
        >>> points = sample_triangle_mesh(vertices, faces, 10)
        >>> points
        tensor([[ 0.0293,  0.2179,  0.2168],
                [ 0.2003, -0.3367,  0.2187],
                [ 0.2152, -0.0943,  0.1907],
                [-0.1852,  0.1686, -0.0522],
                [-0.2167,  0.3171,  0.0737],
                [ 0.2219, -0.0289,  0.1531],
                [ 0.2217, -0.0115,  0.1247],
                [-0.1400,  0.0364, -0.1618],
                [ 0.0658, -0.0310, -0.2198],
                [ 0.1926, -0.1867, -0.2153]])
    """

    helpers._assert_tensor(vertices)
    helpers._assert_tensor(faces)
    helpers._assert_dim_ge(vertices, 2)
    helpers._assert_dim_ge(faces, 2)
    # We want the last dimension of vertices to be of shape 3.
    helpers._assert_shape_eq(vertices, (-1, 3), dim=-1)

    dist_uni = torch.distributions.Uniform(torch.zeros((1,), device=vertices.device),
                                           1.)

    # calculate area of each face
    x1, x2, x3 = torch.split(torch.index_select(
        vertices, 0, faces[:, 0]) - torch.index_select(
        vertices, 0, faces[:, 1]), 1, dim=1)
    y1, y2, y3 = torch.split(torch.index_select(
        vertices, 0, faces[:, 1]) - torch.index_select(
        vertices, 0, faces[:, 2]), 1, dim=1)
    a = (x2 * y3 - x3 * y2) ** 2
    b = (x3 * y1 - x1 * y3) ** 2
    c = (x1 * y2 - x2 * y1) ** 2
    Areas = torch.sqrt(a + b + c) / 2
    # percentage of each face w.r.t. full surface area
    Areas = Areas / (torch.sum(Areas) + eps)

    # define descrete distribution w.r.t. face area ratios caluclated
    cat_dist = torch.distributions.Categorical(Areas.view(-1))
    face_choices = cat_dist.sample([num_samples])

    # from each face sample a point
    select_faces = faces[face_choices]
    xs = torch.index_select(vertices, 0, select_faces[:, 0])
    ys = torch.index_select(vertices, 0, select_faces[:, 1])
    zs = torch.index_select(vertices, 0, select_faces[:, 2])
    u = torch.sqrt(dist_uni.sample([num_samples]))
    v = dist_uni.sample([num_samples])
    points = (1 - u) * xs + (u * (1 - v)) * ys + u * v * zs

    return points
예제 #10
0
def test_triangle_mesh_to_sdf(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    mesh.to(device)
    mesh2sdf = kal.transforms.TriangleMeshToSDF(100)
    helpers._assert_shape_eq(mesh2sdf(mesh), (100,), dim=-1)
예제 #11
0
def test_triangle_mesh_to_voxelgrid(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj')
    mesh.to(device)
    mesh2voxel = kal.transforms.TriangleMeshToVoxelGrid(32)
    helpers._assert_shape_eq(mesh2voxel(mesh), (32, 32, 32))
예제 #12
0
def test_triangle_mesh_to_pointcloud(device='cpu'):
    mesh = TriangleMesh.from_obj('tests/model.obj') 
    mesh.to(device)
    mesh2cloud = kal.transforms.TriangleMeshToPointCloud(10000)
    pts = mesh2cloud(mesh)
    helpers._assert_shape_eq(pts, (10000, 3))
예제 #13
0
def test_upsample_voxelgrid(device='cpu'):
    voxel = torch.ones([32, 32, 32]).to(device)
    up = kal.transforms.UpsampleVoxelGrid(64)
    helpers._assert_shape_eq(up(voxel), (64, 64, 64))
    up = kal.transforms.UpsampleVoxelGrid(33)
    helpers._assert_shape_eq(up(voxel), (33, 33, 33))
def test_smoke(device='cpu'):
    net = kal.models.VoxelSuperresSimple.EncoderDecoder()
    x = torch.randn(1, 1, 32, 32, 32)
    x_ = net(x)
    helpers._assert_shape_eq(x, x_.shape, dim=0)
예제 #15
0
def test_smoke(device='cpu'):
    net = kal.models.VoxelSuperresODM.SuperresNetwork(30, 15)
    x = torch.randn(1, 6, 128, 128)
    x_ = net(x)
    helpers._assert_shape_eq(x, x_.shape, dim=0)