Exemple #1
0
def transform_mesh(mesh: InputTensor,
                   matrix: InputTensor,
                   vertices_are_points=True) -> t.Tensor:
    """Transforms a single 3D mesh.

  Args:
    mesh: The mesh's triangle vertices, float32[d1, ..., dn, num_tri, 3, 3]
    matrix: The transformation matrix, shape=[d1, ..., dn, 4, 4]
    vertices_are_points: Whether to interpret the vertices as points or vectors.

  Returns:
    The transformed mesh, same shape as input mesh

  """
    mesh = util.to_tensor(mesh, dtype=t.float32)
    matrix = util.to_tensor(matrix, dtype=t.float32)

    assert mesh.shape[-2:] == (3, 3)
    assert matrix.shape[-2:] == (4, 4)
    assert mesh.shape[:-3] == matrix.shape[:-2]

    original_shape = mesh.shape
    mesh = mesh.reshape([-1, mesh.shape[-3] * 3, 3])
    matrix = matrix.reshape([-1, 4, 4])
    w = 1 if vertices_are_points else 0
    mesh = transform_points_homogeneous(mesh, matrix, w=w)
    if vertices_are_points:
        mesh = mesh[..., :3] / mesh[..., 3:4]
    else:
        mesh = mesh[..., :3]

    return mesh.reshape(original_shape)
Exemple #2
0
def transform_points_homogeneous(points: InputTensor, matrix: InputTensor,
                                 w: float) -> t.Tensor:
    """Transforms a batch of 3D points with a batch of matrices.

  Args:
    points: The points to transform, float32[d1, ..., dn, num_points, 3]
    matrix: The transformation matrix, float32[d1, ..., dn, 4, 4]
    w: The W value to use. Should be 1 for affine points, 0 for vectors

  Returns:
    The transformed points in homogeneous space,
    float32[d1, ..., dn, num_points, 4]
  """
    points = util.to_tensor(points, dtype=t.float32)
    matrix = util.to_tensor(matrix, dtype=t.float32)
    assert points.shape[-1] == 3
    assert matrix.shape[-2:] == (4, 4)
    assert points.shape[:-2] == matrix.shape[:-2]

    batch_dims = points.shape[:-2]
    # Fold all batch dimensions into a single one
    points = points.reshape([-1] + list(points.shape[-2:]))
    matrix = matrix.reshape([-1] + list(matrix.shape[-2:]))

    points = t.constant_pad_nd(points, [0, 1], value=w)
    result = t.einsum("bnm,bvm->bvn", matrix, points)
    result = result.reshape(batch_dims + result.shape[-2:])

    return result
Exemple #3
0
def scale(v: InputTensor) -> t.Tensor:
    """Computes a scale matrix.

  Args:
    v: The scale vector, float32[N].

  Returns:
    The scale matrix, float32[N+1, N+1]

  """
    v = util.to_tensor(v, dtype=t.float32)
    assert len(v.shape) == 1
    return t.diag(t.cat([v, v.new_ones([1])], dim=0))
Exemple #4
0
def look_at_rh(eye: InputTensor, center: InputTensor,
               up: InputTensor) -> t.Tensor:
    """Computes a right-handed 4x4 look-at camera matrix."""
    eye = util.to_tensor(eye, dtype=t.float32)
    center = util.to_tensor(center, dtype=t.float32)
    up = util.to_tensor(up, dtype=t.float32)
    assert eye.shape == (3, )
    assert center.shape == (3, )
    assert up.shape == (3, )

    f = F.normalize(center - eye, dim=-1)
    s = F.normalize(t.cross(f, up), dim=-1)
    u = t.cross(s, f)

    return eye.new_tensor([
        [s[0], s[1], s[2], -t.dot(s, eye)],
        [u[0], u[1], u[2], -t.dot(u, eye)],
        [-f[0], -f[1], -f[2], t.dot(f, eye)],
        [0, 0, 0, 1],
    ],
                          dtype=t.float32)
Exemple #5
0
def ortho_lh(left: InputTensor, right: InputTensor, bottom: InputTensor,
             top: InputTensor, z_near: InputTensor,
             z_far: InputTensor) -> t.Tensor:
    left = util.to_tensor(left, dtype=t.float32)
    right = util.to_tensor(right, dtype=t.float32)
    bottom = util.to_tensor(bottom, dtype=t.float32)
    top = util.to_tensor(top, dtype=t.float32)
    z_near = util.to_tensor(z_near, dtype=t.float32)
    z_far = util.to_tensor(z_far, dtype=t.float32)
    assert left.shape == ()
    assert right.shape == ()
    assert bottom.shape == ()
    assert top.shape == ()
    assert z_near.shape == ()
    assert z_far.shape == ()

    return left.new_tensor(
        [[2 / (right - left), 0, 0, -(right + left) / (right - left)],
         [0, 2 / (top - bottom), 0, -(top + bottom) / (top - bottom)],
         [0, 0, 2 / (z_far - z_near), -(z_far + z_near) /
          (z_far - z_near)], [0, 0, 0, 1]],
        dtype=t.float32)
Exemple #6
0
def perspective_rh(fov_y: InputTensor, aspect: InputTensor,
                   z_near: InputTensor, z_far: InputTensor) -> t.Tensor:
    fov_y = util.to_tensor(fov_y, dtype=t.float32)
    aspect = util.to_tensor(aspect, dtype=t.float32)
    z_near = util.to_tensor(z_near, dtype=t.float32)
    z_far = util.to_tensor(z_far, dtype=t.float32)
    assert fov_y.shape == ()
    assert aspect.shape == ()
    assert z_near.shape == ()
    assert z_far.shape == ()

    tan_half_fov_y = t.tan(fov_y / 2)
    return fov_y.new_tensor([
        [1.0 / (aspect * tan_half_fov_y), 0, 0, 0],
        [0, 1.0 / tan_half_fov_y, 0, 0],
        [
            0, 0, -(z_far + z_near) / (z_far - z_near), -(2 * z_far * z_near) /
            (z_far - z_near)
        ],
        [0, 0, -1, 0],
    ],
                            dtype=t.float32)