def testRendersTwoCubesInBatch(self):
        """Renders a simple cube in two viewpoints to test the python wrapper."""

        vertex_rgb = (self.cube_vertex_positions * 0.5 + 0.5)
        vertex_rgba = tf.concat([vertex_rgb, tf.ones([8, 1])], axis=1)

        center = self.tf_float([[0.0, 0.0, 0.0]])
        world_up = self.tf_float([[0.0, 1.0, 0.0]])
        look_at_1 = camera_utils.look_at(self.tf_float([[2.0, 3.0, 6.0]]),
                                         center, world_up)
        look_at_2 = camera_utils.look_at(self.tf_float([[-3.0, 1.0, 6.0]]),
                                         center, world_up)
        projection_1 = tf.matmul(self.perspective, look_at_1)
        projection_2 = tf.matmul(self.perspective, look_at_2)
        projection = tf.concat([projection_1, projection_2], axis=0)
        background_value = [0.0, 0.0, 0.0, 0.0]

        rendered = rasterize_triangles.rasterize(
            tf.stack([self.cube_vertex_positions, self.cube_vertex_positions]),
            tf.stack([vertex_rgba, vertex_rgba]), self.cube_triangles,
            projection, self.image_width, self.image_height, background_value)

        with self.test_session() as sess:
            images = sess.run(rendered, feed_dict={})
            for i in (0, 1):
                image = images[i, :, :, :]
                baseline_image_name = 'Unlit_Cube_{}.png'.format(i)
                baseline_image_path = os.path.join(self.test_data_directory,
                                                   baseline_image_name)
                test_utils.expect_image_file_and_render_are_near(
                    self, sess, baseline_image_path, image)
Exemple #2
0
    def runTriangleTest(self, w_vector, target_image_name):
        """Directly renders a rasterized triangle's barycentric coordinates.

    Tests only the kernel (rasterize_triangles_module).

    Args:
      w_vector: 3 element vector of w components to scale triangle vertices.
      target_image_name: image file name to compare result against.
    """
        clip_init = np.array([[-0.5, -0.5, 0.8, 1.0], [0.0, 0.5, 0.3, 1.0],
                              [0.5, -0.5, 0.3, 1.0]],
                             dtype=np.float32)
        clip_init = clip_init * np.reshape(
            np.array(w_vector, dtype=np.float32), [3, 1])

        clip_coordinates = tf.constant(clip_init)
        triangles = tf.constant([[0, 1, 2]], dtype=tf.int32)

        rendered_coordinates, _, _ = (
            rasterize_triangles.rasterize_triangles_module.rasterize_triangles(
                clip_coordinates, triangles, self.image_width,
                self.image_height))
        rendered_coordinates = tf.concat([
            rendered_coordinates,
            tf.ones([self.image_height, self.image_width, 1])
        ],
                                         axis=2)
        with self.test_session() as sess:
            image = rendered_coordinates.eval()
            baseline_image_path = os.path.join(self.test_data_directory,
                                               target_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, sess, baseline_image_path, image)
    def runTriangleTest(self, w_vector, target_image_name):
        """Directly renders a rasterized triangle's barycentric coordinates.

        Tests only the kernel (rasterize_triangles_module).

        Args:
            w_vector: 3-vector of w components to scale triangle vertices.
            target_image_name: image file name to compare result against.
        """
        clip_init = np.array([[-0.5, -0.5, 0.8, 1.0], [0.0, 0.5, 0.3, 1.0],
                              [0.5, -0.5, 0.3, 1.0]],
                             dtype=np.float32)
        clip_init = clip_init * np.reshape(
            np.array(w_vector, dtype=np.float32), [3, 1])

        clip_coordinates = torch.tensor(clip_init)
        triangles = torch.tensor([[0, 1, 2]], dtype=torch.int32)

        _, barycentric_coords, _ = (BarycentricRasterizer.apply(
            clip_coordinates, triangles, self.image_width, self.image_height))
        image = torch.cat([
            barycentric_coords,
            torch.ones([self.image_height, self.image_width, 1])
        ],
                          dim=2)
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, baseline_image_path, image)
    def testRendersTwoCubesInBatch(self):
        """Renders a simple cube in two viewpoints to test the python wrapper.
        """

        vertex_rgb = (self.cube_vertex_positions * 0.5 + 0.5)
        vertex_rgba = torch.cat([vertex_rgb, torch.ones([8, 1])], dim=1)

        center = torch.tensor([[0, 0, 0]], dtype=torch.float32)
        world_up = torch.tensor([[0, 1, 0]], dtype=torch.float32)
        look_at_1 = camera_utils.look_at(
            torch.tensor([[2, 3, 6]], dtype=torch.float32), center, world_up)
        look_at_2 = camera_utils.look_at(
            torch.tensor([[-3, 1, 6]], dtype=torch.float32), center, world_up)
        projection_1 = torch.matmul(self.perspective, look_at_1)
        projection_2 = torch.matmul(self.perspective, look_at_2)
        projection = torch.cat([projection_1, projection_2], dim=0)
        background_value = torch.Tensor([0., 0., 0., 0.])

        rendered = rasterize(
            torch.stack(
                [self.cube_vertex_positions, self.cube_vertex_positions]),
            torch.stack([vertex_rgba, vertex_rgba]), self.cube_triangles,
            projection, self.image_width, self.image_height, background_value)

        for i in (0, 1):
            image = rendered[i, :, :, :]
            baseline_image_name = "Unlit_Cube_{}.png".format(i)
            baseline_image_path = os.path.join(self.test_data_directory,
                                               baseline_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, image)
  def testRendersSimpleCube(self):
    """Renders a simple cube to test the kernel and python wrapper."""

    tf_float = lambda x: tf.constant(x, dtype=tf.float32)
    # camera position:
    eye = tf_float([[2.0, 3.0, 6.0]])
    center = tf_float([[0.0, 0.0, 0.0]])
    world_up = tf_float([[0.0, 1.0, 0.0]])
    image_width = 640
    image_height = 480

    look_at = camera_utils.look_at(eye, center, world_up)
    perspective = camera_utils.perspective(image_width / image_height,
                                           tf_float([40.0]), tf_float([0.01]),
                                           tf_float([10.0]))

    vertex_rgb = (self.cube_vertex_positions * 0.5 + 0.5)
    vertex_rgba = tf.concat([vertex_rgb, tf.ones([8, 1])], axis=1)

    projection = tf.matmul(perspective, look_at)
    background_value = [0.0, 0.0, 0.0, 0.0]

    rendered = rasterize_triangles.rasterize_triangles(
        tf.expand_dims(self.cube_vertex_positions, axis=0),
        tf.expand_dims(vertex_rgba, axis=0), self.cube_triangles, projection,
        image_width, image_height, background_value)

    with self.test_session() as sess:
      image = sess.run(rendered, feed_dict={})[0,...]
      target_image_name = 'Unlit_Cube_0.png'
      baseline_image_path = os.path.join(self.test_data_directory,
                                         target_image_name)
      test_utils.expect_image_file_and_render_are_near(
          self, sess, baseline_image_path, image)
  def testRendersSimpleTriangle(self):
    """Directly renders a rasterized triangle's barycentric coordinates.

    Tests only the kernel (rasterize_triangles_module).
    """
    ndc_init = np.array(
        [[-0.5, -0.5, 0.8], [0.0, 0.5, 0.3], [0.5, -0.5, 0.3]],
        dtype=np.float32)

    image_height = 480
    image_width = 640

    normalized_device_coordinates = tf.constant(ndc_init)
    triangles = tf.constant([[0, 1, 2]], dtype=tf.int32)

    rendered_coordinates, _, _ = (
        rasterize_triangles.rasterize_triangles_module.rasterize_triangles(
            normalized_device_coordinates, triangles, image_width,
            image_height))
    rendered_coordinates = tf.concat(
        [rendered_coordinates,
         tf.ones([image_height, image_width, 1])], axis=2)
    with self.test_session() as sess:
      image = rendered_coordinates.eval()
      target_image_name = 'Simple_Triangle.png'
      baseline_image_path = os.path.join(self.test_data_directory,
                                         target_image_name)
      test_utils.expect_image_file_and_render_are_near(
          self, sess, baseline_image_path, image)
  def testRendersSimpleCube(self):
    """Renders a simple cube to test the kernel and python wrapper."""
    vertex_rgb = (self.cube_vertex_positions * 0.5 + 0.5)
    vertex_rgba = tf.concat([vertex_rgb, tf.ones([8, 1])], axis=1)
    background_value = [0.0, 0.0, 0.0, 0.0]

    rendered = rasterize_triangles.rasterize(
        tf.expand_dims(self.cube_vertex_positions, axis=0),
        tf.expand_dims(vertex_rgba, axis=0), self.cube_triangles,
        self.projection, self.image_width, self.image_height, background_value)

    with self.test_session() as sess:
      image = rendered.eval()[0,...]
      target_image_name = 'Unlit_Cube_0.png'
      baseline_image_path = os.path.join(self.test_data_directory,
                                         target_image_name)
      test_utils.expect_image_file_and_render_are_near(
          self, sess, baseline_image_path, image)
    def testRendersSimpleCube(self):
        """Renders a simple cube to test the full forward pass.

    Verifies the functionality of both the custom kernel and the python wrapper.
    """

        model_transforms = camera_utils.euler_matrices([[-20.0, 0.0, 60.0],
                                                        [45.0, 60.0,
                                                         0.0]])[:, :3, :3]

        vertices_world_space = tf.matmul(tf.stack(
            [self.cube_vertices, self.cube_vertices]),
                                         model_transforms,
                                         transpose_b=True)

        normals_world_space = tf.matmul(tf.stack(
            [self.cube_normals, self.cube_normals]),
                                        model_transforms,
                                        transpose_b=True)

        # camera position:
        eye = tf.constant(2 * [[0.0, 0.0, 6.0]], dtype=tf.float32)
        center = tf.constant(2 * [[0.0, 0.0, 0.0]], dtype=tf.float32)
        world_up = tf.constant(2 * [[0.0, 1.0, 0.0]], dtype=tf.float32)
        image_width = 640
        image_height = 480
        light_positions = tf.constant([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])
        light_intensities = tf.ones([2, 1, 3], dtype=tf.float32)
        vertex_diffuse_colors = tf.ones_like(vertices_world_space,
                                             dtype=tf.float32)

        rendered = mesh_renderer.mesh_renderer(
            vertices_world_space, self.cube_triangles, normals_world_space,
            vertex_diffuse_colors, eye, center, world_up, light_positions,
            light_intensities, image_width, image_height)

        with self.test_session() as sess:
            images = sess.run(rendered, feed_dict={})
            for image_id in range(images.shape[0]):
                target_image_name = 'Gray_Cube_%i.png' % image_id
                baseline_image_path = os.path.join(self.test_data_directory,
                                                   target_image_name)
                test_utils.expect_image_file_and_render_are_near(
                    self, sess, baseline_image_path, images[image_id, :, :, :])
Exemple #9
0
  def testComplexShading(self):
    """Tests specular highlights, colors, and multiple lights per image."""
    # rotate the cube for the test:
    model_transforms = camera_utils.euler_matrices(
        [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

    vertices_world_space = tf.matmul(
        tf.stack([self.cube_vertices, self.cube_vertices]),
        model_transforms,
        transpose_b=True)

    normals_world_space = tf.matmul(
        tf.stack([self.cube_normals, self.cube_normals]),
        model_transforms,
        transpose_b=True)

    # camera position:
    eye = tf.constant([[0.0, 0.0, 6.0], [0., 0.2, 18.0]], dtype=tf.float32)
    center = tf.constant([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=tf.float32)
    world_up = tf.constant(
        [[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=tf.float32)
    fov_y = tf.constant([40., 13.3], dtype=tf.float32)
    near_clip = tf.constant(0.1, dtype=tf.float32)
    far_clip = tf.constant(25.0, dtype=tf.float32)
    image_width = 640
    image_height = 480
    light_positions = tf.constant([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                   [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
    light_intensities = tf.constant(
        [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 0.0, 1.0], [0.0, 2.0,
                                                                1.0]]],
        dtype=tf.float32)
    # pyformat: disable
    vertex_diffuse_colors = tf.constant(2*[[[1.0, 0.0, 0.0],
                                            [0.0, 1.0, 0.0],
                                            [0.0, 0.0, 1.0],
                                            [1.0, 1.0, 1.0],
                                            [1.0, 1.0, 0.0],
                                            [1.0, 0.0, 1.0],
                                            [0.0, 1.0, 1.0],
                                            [0.5, 0.5, 0.5]]],
                                        dtype=tf.float32)
    vertex_specular_colors = tf.constant(2*[[[0.0, 1.0, 0.0],
                                             [0.0, 0.0, 1.0],
                                             [1.0, 1.0, 1.0],
                                             [1.0, 1.0, 0.0],
                                             [1.0, 0.0, 1.0],
                                             [0.0, 1.0, 1.0],
                                             [0.5, 0.5, 0.5],
                                             [1.0, 0.0, 0.0]]],
                                         dtype=tf.float32)
    # pyformat: enable
    shininess_coefficients = 6.0 * tf.ones([2, 8], dtype=tf.float32)
    ambient_color = tf.constant(
        [[0., 0., 0.], [0.1, 0.1, 0.2]], dtype=tf.float32)
    renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        shininess_coefficients, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
            renders[:, :, :, 3:4]
        ],
        axis=3)

    # Check that shininess coefficient broadcasting works by also rendering
    # with a scalar shininess coefficient, and ensuring the result is identical:
    broadcasted_renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        6.0, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_broadcasted_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
            broadcasted_renders[:, :, :, 3:4]
        ],
        axis=3)

    with self.test_session() as sess:
      images, broadcasted_images = sess.run(
          [tonemapped_renders, tonemapped_broadcasted_renders], feed_dict={})

      for image_id in range(images.shape[0]):
        target_image_name = 'Colored_Cube_%i.png' % image_id
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path, images[image_id, :, :, :])
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path,
            broadcasted_images[image_id, :, :, :])
Exemple #10
0
  def testThatCubeRotates(self):
    """Optimize a simple cube's rotation using pixel loss.

    The rotation is represented as static-basis euler angles. This test checks
    that the computed gradients are useful.
    """
    image_height = 480
    image_width = 640
    initial_euler_angles = [[0.0, 0.0, 0.0]]

    euler_angles = tf.Variable(initial_euler_angles)
    model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]

    vertices_world_space = tf.reshape(
        tf.matmul(self.cube_vertices, model_rotation, transpose_b=True),
        [1, 8, 3])

    normals_world_space = tf.reshape(
        tf.matmul(self.cube_normals, model_rotation, transpose_b=True),
        [1, 8, 3])

    # camera position:
    eye = tf.constant([[0.0, 0.0, 6.0]], dtype=tf.float32)
    center = tf.constant([[0.0, 0.0, 0.0]], dtype=tf.float32)
    world_up = tf.constant([[0.0, 1.0, 0.0]], dtype=tf.float32)

    vertex_diffuse_colors = tf.ones_like(vertices_world_space, dtype=tf.float32)
    light_positions = tf.reshape(eye, [1, 1, 3])
    light_intensities = tf.ones([1, 1, 3], dtype=tf.float32)

    render = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height)
    render = tf.reshape(render, [image_height, image_width, 4])

    # Pick the desired cube rotation for the test:
    test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0,
                                                        60.0]])[0, :3, :3]

    desired_vertex_positions = tf.reshape(
        tf.matmul(self.cube_vertices, test_model_rotation, transpose_b=True),
        [1, 8, 3])
    desired_normals = tf.reshape(
        tf.matmul(self.cube_normals, test_model_rotation, transpose_b=True),
        [1, 8, 3])
    desired_render = mesh_renderer.mesh_renderer(
        desired_vertex_positions, self.cube_triangles, desired_normals,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height)
    desired_render = tf.reshape(desired_render, [image_height, image_width, 4])

    loss = tf.reduce_mean(tf.abs(render - desired_render))
    optimizer = tf.train.MomentumOptimizer(0.7, 0.1)
    grad = tf.gradients(loss, [euler_angles])
    grad, _ = tf.clip_by_global_norm(grad, 1.0)
    opt_func = optimizer.apply_gradients([(grad[0], euler_angles)])

    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      for _ in range(35):
        sess.run([loss, opt_func])
      final_image, desired_image = sess.run([render, desired_render])

      target_image_name = 'Gray_Cube_0.png'
      baseline_image_path = os.path.join(self.test_data_directory,
                                         target_image_name)
      test_utils.expect_image_file_and_render_are_near(
          self, sess, baseline_image_path, desired_image)
      test_utils.expect_image_file_and_render_are_near(
          self,
          sess,
          baseline_image_path,
          final_image,
          max_outlier_fraction=0.01,
          pixel_error_threshold=0.04)
    def testComplexShading(self):
        """Test specular highlights, colors, and multiple lights per image."""

        model_transforms = camera_utils.euler_matrices(
            [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

        vertices_world_space = torch.matmul(
            torch.stack([self.cube_vertices, self.cube_vertices]),
            model_transforms.transpose())

        normals_world_space = torch.matmul(
            torch.stack([self.cube_normals, self.cube_normals]),
            model_transforms.transpose())

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.2, 18.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=torch.float32)
        world_up = torch.constant([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=torch.float32)
        fov_y = torch.tensor([40.0, 13.3], dtype=torch.float32)
        near_clip = 0.1
        far_clip = 25.0
        image_width = 640
        image_height = 480
        light_positions = torch.tensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                        [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
        light_intensities = torch.tensor(
            [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
             [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]],
            dtype=torch.float32)
        vertex_diffuse_colors = torch.tensor(2*[[[1.0, 0.0, 0.0],
                                                 [0.0, 1.0, 0.0],
                                                 [0.0, 0.0, 1.0],
                                                 [1.0, 1.0, 1.0],
                                                 [1.0, 1.0, 0.0],
                                                 [1.0, 0.0, 1.0],
                                                 [0.0, 1.0, 1.0],
                                                 [0.5, 0.5, 0.5]]],
                                             dtype=torch.float32)
        vertex_specular_colors = torch.tensor(2*[[[0.0, 1.0, 0.0],
                                                  [0.0, 0.0, 1.0],
                                                  [1.0, 1.0, 1.0],
                                                  [1.0, 1.0, 0.0],
                                                  [1.0, 0.0, 1.0],
                                                  [0.0, 1.0, 1.0],
                                                  [0.5, 0.5, 0.5],
                                                  [1.0, 0.0, 0.0]]],
                                              dtype=torch.float32)
        shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)
        ambient_color = torch.tensor([[0.0, 0.0, 0.0], [0.1, 0.1, 0.2]], dtype=torch.float32)
        renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            shininess_coefficients,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_renders = torch.cat([
                mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
                renders[:, :, :, 3:4]
            ],
            dim=3)

        # Check that shininess coefficient broadcasting works by also rendering
        # with a scalar shininess coefficient, and ensuring the result is identical:
        broadcasted_renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            6.0,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_broadcasted_renders = torch.cat([
                mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
                broadcasted_renders[:, :, :, 3:4]
            ],
            dim=3)

        for image_id in range(renders.shape[0]):
            target_image_name = "Colored_Cube_%i.png" % image_id
            baseline_image_path = os.path.join(self.test_data_directory,
                                               target_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_renders[image_id, :, :, :])
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_broadcasted_renders[image_id, :, :, :])
    def testThatCubeRotates(self):
        """Optimize a simple cube's rotation using pixel loss.

        The rotation is represented as static-basis euler angles. This test checks
        that the computed gradients are useful.
        """
        image_height = 480
        image_width = 640
        initial_euler_angles = [[0.0, 0.0, 0.0]]

        euler_angles = torch.tensor(initial_euler_angles, requires_grad=True)
        model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]
        model_rotation.requires_grad = True

        vertices_world_space = torch.reshape(
            torch.matmul(self.cube_vertices, model_rotation.transpose()),
            [1, 8, 3])

        normals_world_space = torch.reshape(
            torch.matmul(self.cube_normals, model_rotation.transpose()),
            [1, 8, 3])

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32)
        world_up = torch.tensor([[0.0, 1.0, 0.0]], dtype=torch.float32)

        vertex_diffuse_colors = torch.ones_like(vertices_world_space)
        light_positions = torch.reshape(eye, [1, 1, 3])
        light_intensities = torch.ones([1, 1, 3], dtype=torch.float32)

        # Pick the desired cube rotation for the test:
        test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0, 60.0]])[0, :3, :3]

        desired_vertex_positions = torch.reshape(
            torch.matmul(self.cube_vertices, test_model_rotation.transpose())
            [1, 8, 3])
        desired_normals = torch.reshape(
            torch.matmul(self.cube_normals, test_model_rotation.transpose()),
            [1, 8, 3])

        optimizer = torch.optim.SGD([euler_angles], lr=0.7, momentum=0.1)
        for _ in range(35):
            optimizer.zero_grad()
            render = mesh_renderer.mesh_renderer(
                vertices_world_space,
                self.cube_triangles,
                normals_world_space,
                vertex_diffuse_colors,
                eye,
                center,
                world_up,
                light_positions,
                light_intensities,
                image_width,
                image_height)
            desired_render = mesh_renderer.mesh_renderer(
                desired_vertex_positions,
                self.cube_triangles,
                desired_normals,
                vertex_diffuse_colors,
                eye,
                center,
                world_up,
                light_positions,
                light_intensities,
                image_width,
                image_height)
            loss = torch.mean(torch.abs(render - desired_render))
            loss.backward()
            optimizer.step()

        render = torch.reshape(render, [image_height, image_width, 4])
        desired_render = torch.reshape(desired_render, [image_height, image_width, 4])
        target_image_name = "Gray_Cube_0.png"
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, baseline_image_path, desired_render)
        test_utils.expect_image_file_and_render_are_near(
            self,
            baseline_image_path,
            render,
            max_outlier_fraction=0.01,
            pixel_error_threshold=0.04)