Ejemplo n.º 1
0
    def test_perspective(self):
        far = 10.0
        near = 1.0
        cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=60.0)
        P = cameras.get_projection_transform()
        # vertices are at the far clipping plane so z gets mapped to 1.
        vertices = torch.tensor([1, 2, far], dtype=torch.float32)
        projected_verts = torch.tensor(
            [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32)
        vertices = vertices[None, None, :]
        v1 = P.transform_points(vertices)
        v2 = perspective_project_naive(vertices, fov=60.0)
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(far * v1[..., 2], v2[..., 2])
        self.assertClose(v1.squeeze(), projected_verts)

        # vertices are at the near clipping plane so z gets mapped to 0.0.
        vertices[..., 2] = near
        projected_verts = torch.tensor(
            [np.sqrt(3) / near, 2 * np.sqrt(3) / near, 0.0],
            dtype=torch.float32)
        v1 = P.transform_points(vertices)
        v2 = perspective_project_naive(vertices, fov=60.0)
        self.assertClose(v1[..., :2], v2[..., :2])
        self.assertClose(v1.squeeze(), projected_verts)
Ejemplo n.º 2
0
 def test_perspective_kwargs(self):
     cameras = OpenGLPerspectiveCameras(znear=5.0, zfar=100.0, fov=0.0)
     # Override defaults by passing in values to get_projection_transform
     far = 10.0
     P = cameras.get_projection_transform(znear=1.0, zfar=far, fov=60.0)
     vertices = torch.tensor([1, 2, far], dtype=torch.float32)
     projected_verts = torch.tensor(
         [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32)
     vertices = vertices[None, None, :]
     v1 = P.transform_points(vertices)
     self.assertClose(v1.squeeze(), projected_verts)
Ejemplo n.º 3
0
 def test_perspective_mixed_inputs_grad(self):
     far = torch.tensor([10.0])
     near = 1.0
     fov = torch.tensor(60.0, requires_grad=True)
     cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov)
     P = cameras.get_projection_transform()
     vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
     vertices_batch = vertices[None, None, :]
     v1 = P.transform_points(vertices_batch).squeeze()
     v1.sum().backward()
     self.assertTrue(hasattr(fov, "grad"))
     fov_grad = fov.grad.clone()
     half_fov_rad = (math.pi / 180.0) * fov.detach() / 2.0
     grad_cotan = -(1.0 / (torch.sin(half_fov_rad)**2.0) * 1 / 2.0)
     grad_fov = (math.pi / 180.0) * grad_cotan
     grad_fov = (vertices[0] + vertices[1]) * grad_fov / 10.0
     self.assertClose(fov_grad, grad_fov)
Ejemplo n.º 4
0
 def test_perspective_mixed_inputs_broadcast(self):
     far = torch.tensor([10.0, 20.0], dtype=torch.float32)
     near = 1.0
     fov = torch.tensor(60.0)
     cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov)
     P = cameras.get_projection_transform()
     vertices = torch.tensor([1, 2, 10], dtype=torch.float32)
     z1 = 1.0  # vertices at far clipping plane so z = 1.0
     z2 = (20.0 / (20.0 - 1.0) * 10.0 + -(20.0) / (20.0 - 1.0)) / 10.0
     projected_verts = torch.tensor(
         [
             [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z1],
             [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z2],
         ],
         dtype=torch.float32,
     )
     vertices = vertices[None, None, :]
     v1 = P.transform_points(vertices)
     v2 = perspective_project_naive(vertices, fov=60.0)
     self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2])
     self.assertClose(v1.squeeze(), projected_verts)