예제 #1
0
    def test_model_to_screen_jacobian_random(self):
        """Tests the Jacobian of model_to_screen."""
        tensor_size = np.random.randint(1, 3)
        tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
        point_world_space_init = np.random.uniform(size=tensor_shape + [3])
        camera_position_init = np.random.uniform(size=tensor_shape + [3])
        camera_up_init = np.random.uniform(size=tensor_shape + [3])
        look_at_init = np.random.uniform(size=tensor_shape + [3])
        vertical_field_of_view_init = np.random.uniform(0.1,
                                                        1.0,
                                                        size=tensor_shape +
                                                        [1])
        lower_left_corner_init = np.random.uniform(size=tensor_shape + [2])
        screen_dimensions_init = np.random.uniform(0.1,
                                                   1.0,
                                                   size=tensor_shape + [2])
        near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1])
        far_init = near_init + np.random.uniform(
            0.1, 1.0, size=tensor_shape + [1])

        args = [
            point_world_space_init, camera_position_init, look_at_init,
            camera_up_init, vertical_field_of_view_init,
            screen_dimensions_init, near_init, far_init, lower_left_corner_init
        ]

        with self.subTest(name="jacobian_y_projection"):
            self.assert_jacobian_is_correct_fn(
                lambda *args: glm.model_to_screen(*args)[0], args)

        with self.subTest(name="jacobian_w"):
            self.assert_jacobian_is_correct_fn(
                lambda *args: glm.model_to_screen(*args)[1], args)
예제 #2
0
    def test_model_to_screen_jacobian_preset(self):
        """Tests the Jacobian of model_to_screen."""
        point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1)))
        camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1)))
        camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))
        look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0)))
        vertical_field_of_view_init = np.array(
            ((60.0 * math.pi / 180.0, ), (65 * math.pi / 180, )))
        lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0)))
        screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0)))
        near_init = np.array(((0.01, ), (1.0, )))
        far_init = np.array(((4.0, ), (3.0, )))

        args = [
            point_world_space_init, camera_position_init, look_at_init,
            camera_up_init, vertical_field_of_view_init,
            screen_dimensions_init, near_init, far_init, lower_left_corner_init
        ]

        with self.subTest(name="jacobian_y_projection"):
            self.assert_jacobian_is_correct_fn(
                lambda *args: glm.model_to_screen(*args)[0], args)

        with self.subTest(name="jacobian_w"):
            self.assert_jacobian_is_correct_fn(
                lambda *args: glm.model_to_screen(*args)[1], args)
예제 #3
0
    def test_model_to_screen_preset(self):
        """Tests that model_to_screen generates expected results."""
        point_world_space = ((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))
        camera_position = ((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))
        camera_up = ((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
        look_at = ((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))
        vertical_field_of_view = ((60.0 * math.pi / 180.0, ),
                                  (65 * math.pi / 180, ))
        lower_left_corner = ((0.0, 0.0), (10.0, 20.0))
        screen_dimensions = ((501.0, 501.0), (400.0, 600.0))
        near = ((0.01, ), (1.0, ))
        far = ((4.0, ), (3.0, ))

        pred_screen, pred_w = glm.model_to_screen(point_world_space,
                                                  camera_position, look_at,
                                                  camera_up,
                                                  vertical_field_of_view,
                                                  screen_dimensions, near, far,
                                                  lower_left_corner)

        gt_screen = ((-13.23016357, 599.30444336, 4.00215721),
                     (98.07017517, -95.40383911, 3.1234405))
        gt_w = ((5.1, ), (3.42247, ))
        self.assertAllClose(pred_screen, gt_screen, atol=1e-5, rtol=1e-5)
        self.assertAllClose(pred_w, gt_w)
예제 #4
0
  def test_model_to_screen_jacobian_random(self):
    """Tests the Jacobian of model_to_screen."""
    tensor_size = np.random.randint(1, 3)
    tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
    point_world_space_init = np.random.uniform(size=tensor_shape + [3])
    camera_position_init = np.random.uniform(size=tensor_shape + [3])
    camera_up_init = np.random.uniform(size=tensor_shape + [3])
    look_at_init = np.random.uniform(size=tensor_shape + [3])
    vertical_field_of_view_init = np.random.uniform(
        0.1, 1.0, size=tensor_shape + [1])
    lower_left_corner_init = np.random.uniform(size=tensor_shape + [2])
    screen_dimensions_init = np.random.uniform(
        0.1, 1.0, size=tensor_shape + [2])
    near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1])
    far_init = near_init + np.random.uniform(0.1, 1.0, size=tensor_shape + [1])

    # Build matrices.
    model_to_eye_matrix = look_at.right_handed(camera_position_init,
                                               look_at_init, camera_up_init)
    perspective_matrix = perspective.right_handed(
        vertical_field_of_view_init,
        screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2],
        near_init, far_init)

    args = [
        point_world_space_init, model_to_eye_matrix, perspective_matrix,
        screen_dimensions_init, lower_left_corner_init
    ]

    with self.subTest(name="jacobian_y_projection"):
      self.assert_jacobian_is_correct_fn(
          lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4)
예제 #5
0
  def test_model_to_screen_jacobian_preset(self):
    """Tests the Jacobian of model_to_screen."""
    point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1)))
    camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1)))
    camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))
    look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0)))
    vertical_field_of_view_init = np.array(
        ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,)))
    lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0)))
    screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0)))
    near_init = np.array(((0.01,), (1.0,)))
    far_init = np.array(((4.0,), (3.0,)))

    # Build matrices.
    model_to_eye_matrix = look_at.right_handed(camera_position_init,
                                               look_at_init, camera_up_init)
    perspective_matrix = perspective.right_handed(
        vertical_field_of_view_init,
        screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2],
        near_init, far_init)

    args = [
        point_world_space_init, model_to_eye_matrix, perspective_matrix,
        screen_dimensions_init, lower_left_corner_init
    ]

    with self.subTest(name="jacobian_y_projection"):
      self.assert_jacobian_is_correct_fn(
          lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4)
예제 #6
0
  def test_model_to_screen_preset(self):
    """Tests that model_to_screen generates expected results."""
    point_world_space = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1)))
    camera_position = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1)))
    camera_up = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))
    look_at_point = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0)))
    vertical_field_of_view = np.array(
        ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,)))
    lower_left_corner = np.array(((0.0, 0.0), (10.0, 20.0)))
    screen_dimensions = np.array(((501.0, 501.0), (400.0, 600.0)))
    near = np.array(((0.01,), (1.0,)))
    far = np.array(((4.0,), (3.0,)))

    # Build matrices.
    model_to_eye_matrix = look_at.right_handed(camera_position, look_at_point,
                                               camera_up)
    perspective_matrix = perspective.right_handed(
        vertical_field_of_view,
        screen_dimensions[..., 0:1] / screen_dimensions[..., 1:2], near, far)

    pred_screen, pred_w = glm.model_to_screen(point_world_space,
                                              model_to_eye_matrix,
                                              perspective_matrix,
                                              screen_dimensions,
                                              lower_left_corner)

    gt_screen = ((-13.23016357, 599.30444336, 4.00215721),
                 (98.07017517, -95.40383911, 3.1234405))
    gt_w = ((5.1,), (3.42247,))
    self.assertAllClose(pred_screen, gt_screen, atol=1e-5, rtol=1e-5)
    self.assertAllClose(pred_w, gt_w)
예제 #7
0
  def test_perspective_correct_interpolation_preset(self):
    """Tests that perspective_correct_interpolation generates expected results."""
    camera_origin = np.array((0.0, 0.0, 0.0))
    camera_up = np.array((0.0, 1.0, 0.0))
    look_at_point = np.array((0.0, 0.0, 1.0))
    fov = np.array((90.0 * np.math.pi / 180.0,))
    bottom_left = np.array((0.0, 0.0))
    image_size = np.array((501.0, 501.0))
    near_plane = np.array((0.01,))
    far_plane = np.array((10.0,))
    batch_size = np.random.randint(1, 5)
    triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2))
    triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1))
    triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1)
    # Builds barycentric weights.
    barycentric_weights = np.random.uniform(size=(batch_size, 3))
    barycentric_weights = barycentric_weights / np.sum(
        barycentric_weights, axis=-1, keepdims=True)
    # Barycentric interpolation of vertex positions.
    convex_combination = np.einsum("ba, bac -> bc", barycentric_weights,
                                   triangles)
    # Build matrices.
    model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point,
                                               camera_up)
    perspective_matrix = perspective.right_handed(
        fov, (image_size[0:1] / image_size[1:2]), near_plane, far_plane)

    # Computes where those points project in screen coordinates.
    pixel_position, _ = glm.model_to_screen(convex_combination,
                                            model_to_eye_matrix,
                                            perspective_matrix, image_size,
                                            bottom_left)

    # Builds attributes.
    num_pixels = pixel_position.shape[0]
    attribute_size = np.random.randint(10)
    attributes = np.random.uniform(size=(num_pixels, 3, attribute_size))

    prediction = glm.perspective_correct_interpolation(triangles, attributes,
                                                       pixel_position[..., 0:2],
                                                       model_to_eye_matrix,
                                                       perspective_matrix,
                                                       image_size, bottom_left)

    groundtruth = np.einsum("ba, bac -> bc", barycentric_weights, attributes)
    self.assertAllClose(prediction, groundtruth)