Beispiel #1
0
def Render_layer(face_shape, face_norm, face_color, facemodel, batchsize):

    camera_position = tf.constant([0, 0, 10.0])
    camera_lookat = tf.constant([0, 0, 0.0])
    camera_up = tf.constant([0, 1.0, 0])
    light_positions = tf.tile(tf.reshape(tf.constant([0, 0, 1e5]), [1, 1, 3]),
                              [batchsize, 1, 1])
    light_intensities = tf.tile(
        tf.reshape(tf.constant([0.0, 0.0, 0.0]), [1, 1, 3]), [batchsize, 1, 1])
    ambient_color = tf.tile(tf.reshape(tf.constant([1.0, 1, 1]), [1, 3]),
                            [batchsize, 1])

    #pdb.set_trace()
    render = mesh_renderer.mesh_renderer(face_shape,
                                         tf.cast(facemodel.tri - 1, tf.int32),
                                         face_norm,
                                         face_color / 255,
                                         camera_position=camera_position,
                                         camera_lookat=camera_lookat,
                                         camera_up=camera_up,
                                         light_positions=light_positions,
                                         light_intensities=light_intensities,
                                         image_width=224,
                                         image_height=224,
                                         fov_y=12.5936,
                                         ambient_color=ambient_color)

    return render
    def Render_block(self, face_shape, face_norm, face_color, facemodel,
                     batchsize):
        # render reconstruction images
        n_vex = int(facemodel.idBase.shape[0].value / 3)
        fov_y = 2 * tf.atan(112 /
                            (1015.)) * 180. / m.pi + tf.zeros([batchsize])

        # full face region
        face_shape = tf.reshape(face_shape, [batchsize, n_vex, 3])
        face_norm = tf.reshape(face_norm, [batchsize, n_vex, 3])
        face_color = tf.reshape(face_color, [batchsize, n_vex, 3])

        #cammera settings
        # same as in Projection_block
        camera_position = tf.constant([[0, 0, 10.0]]) + tf.zeros(
            [batchsize, 3])
        camera_lookat = tf.constant([[0, 0, 0.0]]) + tf.zeros([batchsize, 3])
        camera_up = tf.constant([[0, 1.0, 0]]) + tf.zeros([batchsize, 3])

        # setting light source position(intensities are set to 0 because we have already computed the vertex color)
        light_positions = tf.reshape(tf.constant([0, 0, 1e5]),
                                     [1, 1, 3]) + tf.zeros([batchsize, 1, 3])
        light_intensities = tf.reshape(tf.constant([0.0, 0.0, 0.0]),
                                       [1, 1, 3]) + tf.zeros([batchsize, 1, 3])
        ambient_color = tf.reshape(tf.constant([1.0, 1, 1]),
                                   [1, 3]) + tf.zeros([batchsize, 3])

        near_clip = 0.01 * tf.ones([batchsize])
        far_clip = 50 * tf.ones([batchsize])
        #using tf_mesh_renderer for rasterization (https://github.com/google/tf_mesh_renderer)
        # img: [batchsize,224,224,4] images in RGBA order (0-255)

        if not is_windows:
            with tf.device('/cpu:0'):
                img = mesh_renderer.mesh_renderer(
                    face_shape,
                    tf.cast(facemodel.face_buf - 1, tf.int32),
                    face_norm,
                    face_color,
                    camera_position=camera_position,
                    camera_lookat=camera_lookat,
                    camera_up=camera_up,
                    light_positions=light_positions,
                    light_intensities=light_intensities,
                    image_width=224,
                    image_height=224,
                    fov_y=fov_y,  #12.5936
                    ambient_color=ambient_color,
                    near_clip=near_clip,
                    far_clip=far_clip)
            return img
        else:
            return np.zeros([224, 224], dtype=np.int32)
    def testFullRenderGradientComputation(self):
        """Verifies the Jacobian matrix for the entire renderer.

    This ensures correct gradients are propagated backwards through the entire
    process, not just through the rasterization kernel. Uses the simple cube
    forward pass.
    """
        image_height = 21
        image_width = 28

        # rotate the cube for the test:
        model_transforms = camera_utils.euler_matrices([[-20.0, 0.0, 60.0],
                                                        [45.0, 60.0,
                                                         0.0]])[:, :3, :3]

        vertices_world_space = tf.matmul(tf.stack(
            [self.cube_vertices, self.cube_vertices]),
                                         model_transforms,
                                         transpose_b=True)

        normals_world_space = tf.matmul(tf.stack(
            [self.cube_normals, self.cube_normals]),
                                        model_transforms,
                                        transpose_b=True)

        # camera position:
        eye = tf.constant([0.0, 0.0, 6.0], dtype=tf.float32)
        center = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32)
        world_up = tf.constant([0.0, 1.0, 0.0], dtype=tf.float32)

        # Scene has a single light from the viewer's eye.
        light_positions = tf.expand_dims(tf.stack([eye, eye], axis=0), axis=1)
        light_intensities = tf.ones([2, 1, 3], dtype=tf.float32)

        vertex_diffuse_colors = tf.ones_like(vertices_world_space,
                                             dtype=tf.float32)

        rendered = mesh_renderer.mesh_renderer(
            vertices_world_space, self.cube_triangles, normals_world_space,
            vertex_diffuse_colors, eye, center, world_up, light_positions,
            light_intensities, image_width, image_height)

        with self.test_session():
            theoretical, numerical = tf.test.compute_gradient(
                self.cube_vertices, (8, 3),
                rendered, (2, image_height, image_width, 4),
                x_init_value=self.cube_vertices.eval(),
                delta=1e-3)
            jacobians_match, message = (
                test_utils.check_jacobians_are_nearly_equal(
                    theoretical, numerical, 0.01, 0.01))
            self.assertTrue(jacobians_match, message)
Beispiel #4
0
 def call(self, x):
     vertices, faces = x[0], x[1]
     normals = x[2]
     colors = x[3]
     eye, center, world_up = x[4], x[5], x[6]
     light_positions, light_intensities = x[7], x[8]
     return mesh_renderer.mesh_renderer(
         vertices, faces, normals, colors,
         K.tile(eye[tf.newaxis, :], [vertices.shape[0], 1]),
         K.tile(center[tf.newaxis, :], [vertices.shape[0], 1]),
         K.tile(world_up[tf.newaxis, :], [vertices.shape[0], 1]),
         K.tile(light_positions[tf.newaxis, :, :],
                [vertices.shape[0], 1, 1]),
         K.tile(light_intensities[tf.newaxis, :, :],
                [vertices.shape[0], 1, 1]), self.resolution[0],
         self.resolution[1])
    def testRendersSimpleCube(self):
        """Renders a simple cube to test the full forward pass.

    Verifies the functionality of both the custom kernel and the python wrapper.
    """

        model_transforms = camera_utils.euler_matrices([[-20.0, 0.0, 60.0],
                                                        [45.0, 60.0,
                                                         0.0]])[:, :3, :3]

        vertices_world_space = tf.matmul(tf.stack(
            [self.cube_vertices, self.cube_vertices]),
                                         model_transforms,
                                         transpose_b=True)

        normals_world_space = tf.matmul(tf.stack(
            [self.cube_normals, self.cube_normals]),
                                        model_transforms,
                                        transpose_b=True)

        # camera position:
        eye = tf.constant(2 * [[0.0, 0.0, 6.0]], dtype=tf.float32)
        center = tf.constant(2 * [[0.0, 0.0, 0.0]], dtype=tf.float32)
        world_up = tf.constant(2 * [[0.0, 1.0, 0.0]], dtype=tf.float32)
        image_width = 640
        image_height = 480
        light_positions = tf.constant([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])
        light_intensities = tf.ones([2, 1, 3], dtype=tf.float32)
        vertex_diffuse_colors = tf.ones_like(vertices_world_space,
                                             dtype=tf.float32)

        rendered = mesh_renderer.mesh_renderer(
            vertices_world_space, self.cube_triangles, normals_world_space,
            vertex_diffuse_colors, eye, center, world_up, light_positions,
            light_intensities, image_width, image_height)

        with self.test_session() as sess:
            images = sess.run(rendered, feed_dict={})
            for image_id in range(images.shape[0]):
                target_image_name = 'Gray_Cube_%i.png' % image_id
                baseline_image_path = os.path.join(self.test_data_directory,
                                                   target_image_name)
                test_utils.expect_image_file_and_render_are_near(
                    self, sess, baseline_image_path, images[image_id, :, :, :])
        def render_complex_cube(cube_vertices):
            vertices_world_space = torch.matmul(
                torch.stack([cube_vertices, cube_vertices]),
                model_transforms.transpose())

            vertex_diffuse_colors = torch.ones_like(vertices_world_space, dtype=torch.float32)

            return mesh_renderer.mesh_renderer(
                vertices_world_space,
                self.cube_triangles,
                normals_world_space,
                vertex_diffuse_colors,
                eye,
                center,
                world_up,
                light_positions,
                light_intensities,
                image_width,
                image_height
            )
Beispiel #7
0
  def testComplexShading(self):
    """Tests specular highlights, colors, and multiple lights per image."""
    # rotate the cube for the test:
    model_transforms = camera_utils.euler_matrices(
        [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

    vertices_world_space = tf.matmul(
        tf.stack([self.cube_vertices, self.cube_vertices]),
        model_transforms,
        transpose_b=True)

    normals_world_space = tf.matmul(
        tf.stack([self.cube_normals, self.cube_normals]),
        model_transforms,
        transpose_b=True)

    # camera position:
    eye = tf.constant([[0.0, 0.0, 6.0], [0., 0.2, 18.0]], dtype=tf.float32)
    center = tf.constant([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=tf.float32)
    world_up = tf.constant(
        [[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=tf.float32)
    fov_y = tf.constant([40., 13.3], dtype=tf.float32)
    near_clip = tf.constant(0.1, dtype=tf.float32)
    far_clip = tf.constant(25.0, dtype=tf.float32)
    image_width = 640
    image_height = 480
    light_positions = tf.constant([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                   [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
    light_intensities = tf.constant(
        [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 0.0, 1.0], [0.0, 2.0,
                                                                1.0]]],
        dtype=tf.float32)
    # pyformat: disable
    vertex_diffuse_colors = tf.constant(2*[[[1.0, 0.0, 0.0],
                                            [0.0, 1.0, 0.0],
                                            [0.0, 0.0, 1.0],
                                            [1.0, 1.0, 1.0],
                                            [1.0, 1.0, 0.0],
                                            [1.0, 0.0, 1.0],
                                            [0.0, 1.0, 1.0],
                                            [0.5, 0.5, 0.5]]],
                                        dtype=tf.float32)
    vertex_specular_colors = tf.constant(2*[[[0.0, 1.0, 0.0],
                                             [0.0, 0.0, 1.0],
                                             [1.0, 1.0, 1.0],
                                             [1.0, 1.0, 0.0],
                                             [1.0, 0.0, 1.0],
                                             [0.0, 1.0, 1.0],
                                             [0.5, 0.5, 0.5],
                                             [1.0, 0.0, 0.0]]],
                                         dtype=tf.float32)
    # pyformat: enable
    shininess_coefficients = 6.0 * tf.ones([2, 8], dtype=tf.float32)
    ambient_color = tf.constant(
        [[0., 0., 0.], [0.1, 0.1, 0.2]], dtype=tf.float32)
    renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        shininess_coefficients, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
            renders[:, :, :, 3:4]
        ],
        axis=3)

    # Check that shininess coefficient broadcasting works by also rendering
    # with a scalar shininess coefficient, and ensuring the result is identical:
    broadcasted_renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        6.0, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_broadcasted_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
            broadcasted_renders[:, :, :, 3:4]
        ],
        axis=3)

    with self.test_session() as sess:
      images, broadcasted_images = sess.run(
          [tonemapped_renders, tonemapped_broadcasted_renders], feed_dict={})

      for image_id in range(images.shape[0]):
        target_image_name = 'Colored_Cube_%i.png' % image_id
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path, images[image_id, :, :, :])
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path,
            broadcasted_images[image_id, :, :, :])
Beispiel #8
0
  def testThatCubeRotates(self):
    """Optimize a simple cube's rotation using pixel loss.

    The rotation is represented as static-basis euler angles. This test checks
    that the computed gradients are useful.
    """
    image_height = 480
    image_width = 640
    initial_euler_angles = [[0.0, 0.0, 0.0]]

    euler_angles = tf.Variable(initial_euler_angles)
    model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]

    vertices_world_space = tf.reshape(
        tf.matmul(self.cube_vertices, model_rotation, transpose_b=True),
        [1, 8, 3])

    normals_world_space = tf.reshape(
        tf.matmul(self.cube_normals, model_rotation, transpose_b=True),
        [1, 8, 3])

    # camera position:
    eye = tf.constant([[0.0, 0.0, 6.0]], dtype=tf.float32)
    center = tf.constant([[0.0, 0.0, 0.0]], dtype=tf.float32)
    world_up = tf.constant([[0.0, 1.0, 0.0]], dtype=tf.float32)

    vertex_diffuse_colors = tf.ones_like(vertices_world_space, dtype=tf.float32)
    light_positions = tf.reshape(eye, [1, 1, 3])
    light_intensities = tf.ones([1, 1, 3], dtype=tf.float32)

    render = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height)
    render = tf.reshape(render, [image_height, image_width, 4])

    # Pick the desired cube rotation for the test:
    test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0,
                                                        60.0]])[0, :3, :3]

    desired_vertex_positions = tf.reshape(
        tf.matmul(self.cube_vertices, test_model_rotation, transpose_b=True),
        [1, 8, 3])
    desired_normals = tf.reshape(
        tf.matmul(self.cube_normals, test_model_rotation, transpose_b=True),
        [1, 8, 3])
    desired_render = mesh_renderer.mesh_renderer(
        desired_vertex_positions, self.cube_triangles, desired_normals,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height)
    desired_render = tf.reshape(desired_render, [image_height, image_width, 4])

    loss = tf.reduce_mean(tf.abs(render - desired_render))
    optimizer = tf.train.MomentumOptimizer(0.7, 0.1)
    grad = tf.gradients(loss, [euler_angles])
    grad, _ = tf.clip_by_global_norm(grad, 1.0)
    opt_func = optimizer.apply_gradients([(grad[0], euler_angles)])

    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      for _ in range(35):
        sess.run([loss, opt_func])
      final_image, desired_image = sess.run([render, desired_render])

      target_image_name = 'Gray_Cube_0.png'
      baseline_image_path = os.path.join(self.test_data_directory,
                                         target_image_name)
      test_utils.expect_image_file_and_render_are_near(
          self, sess, baseline_image_path, desired_image)
      test_utils.expect_image_file_and_render_are_near(
          self,
          sess,
          baseline_image_path,
          final_image,
          max_outlier_fraction=0.01,
          pixel_error_threshold=0.04)
                                 model_transforms,
                                 transpose_b=True)

normals_world_space = tf.matmul(tf.stack([cube_normals, cube_normals]),
                                model_transforms,
                                transpose_b=True)

# camera position:
eye = tf.constant(2 * [[0.0, 0.0, 6.0]], dtype=tf.float32)
center = tf.constant(2 * [[0.0, 0.0, 0.0]], dtype=tf.float32)
world_up = tf.constant(2 * [[0.0, 1.0, 0.0]], dtype=tf.float32)
image_width = 1920
image_height = 1080
light_positions = tf.constant([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])
light_intensities = tf.ones([2, 1, 3], dtype=tf.float32)
vertex_diffuse_colors = tf.ones_like(vertices_world_space, dtype=tf.float32)

rendered = mesh_renderer.mesh_renderer(vertices_world_space, cube_triangles,
                                       normals_world_space,
                                       vertex_diffuse_colors, eye, center,
                                       world_up, light_positions,
                                       light_intensities, image_width,
                                       image_height)
import skimage, skimage.io
with tf.Session() as sess:
    images = sess.run(rendered, feed_dict={})
    for image_id in range(images.shape[0]):
        target_image_name = 'Gray_Cube_%i.png' % image_id
        baseline_image_path = os.path.join('out', target_image_name)
        skimage.io.imsave(baseline_image_path, images[image_id])
Beispiel #10
0
def main():

    test_data_directory = ('./test_data_face/render')

    # load obj
    face_mesh = m3io.import_mesh('./test_data_face/mesh.obj')

    texture_index = (face_mesh.tcoords.points[:, ::-1] *
                     face_mesh.texture.shape).astype(np.int32)

    vertex_color = face_mesh.texture.pixels[:, 1 - texture_index[:, 0],
                                            texture_index[:, 1]].T

    tf.reset_default_graph()
    # Set up a basic cube centered at the origin, with vertex normals pointing
    # outwards along the line from the origin to the cube vertices:
    face_vertices = tf.constant(face_mesh.points, dtype=tf.float32)
    face_normals = tf.nn.l2_normalize(face_vertices, dim=1)
    face_triangles = tf.constant(face_mesh.trilist, dtype=tf.int32)

    # testRendersSimpleCube:
    """Renders a simple cube to test the full forward pass.

    Verifies the functionality of both the custom kernel and the python wrapper.
    """

    n_randering = 16

    model_transforms = camera_utils.euler_matrices(
        tf.random_uniform([n_randering, 3]) * np.pi / 2 -
        np.pi / 4.)[:, :3, :3]

    vertices_world_space = tf.matmul(tf.stack(
        [face_vertices for _ in range(n_randering)]),
                                     model_transforms,
                                     transpose_b=True)

    normals_world_space = tf.matmul(tf.stack(
        [face_normals for _ in range(n_randering)]),
                                    model_transforms,
                                    transpose_b=True)

    # camera position:
    eye = tf.constant(n_randering * [[0.0, 0.0, 6.0]], dtype=tf.float32)
    center = tf.constant(n_randering * [[0.0, 0.0, 0.0]], dtype=tf.float32)
    world_up = tf.constant(n_randering * [[0.0, 1.0, 0.0]], dtype=tf.float32)
    ambient_colors = tf.constant(n_randering * [[0.2, 0.2, 0.2]],
                                 dtype=tf.float32)
    image_width = 256
    image_height = 256
    light_positions = tf.constant(n_randering *
                                  [[[6.0, 6.0, 6.0], [-6.0, -6.0, 6.0]]])
    light_intensities = tf.ones([n_randering, 1, 3], dtype=tf.float32)
    vertex_diffuse_colors = tf.constant(np.stack(
        [vertex_color for _ in range(n_randering)]),
                                        dtype=tf.float32)

    rendered = mesh_renderer.mesh_renderer(
        vertices_world_space,
        triangles=face_triangles,
        normals=normals_world_space,
        diffuse_colors=vertex_diffuse_colors,
        camera_position=eye,
        camera_lookat=center,
        camera_up=world_up,
        light_positions=light_positions,
        light_intensities=light_intensities,
        image_width=image_width,
        image_height=image_height,
        ambient_color=ambient_colors)

    image_id = 0
    with tf.Session() as sess:
        fps_list = []
        while (image_id < 100):
            start_time = time.time()
            images = sess.run(rendered, feed_dict={})
            for image in images:
                target_image_name = 'Gray_face_%i.png' % image_id
                image_id += 1
                baseline_image_path = os.path.join(test_data_directory,
                                                   target_image_name)

                mio.export_image(Image.init_from_channels_at_back(
                    image[..., :3].clip(0, 1)),
                                 baseline_image_path,
                                 overwrite=True)

            end_time = time.time()
            fps = n_randering / (end_time - start_time)
            fps_list.append(fps)
            if len(fps_list) > 5:
                fps_list.pop(0)
            print(np.mean(fps_list))
    def testComplexShading(self):
        """Test specular highlights, colors, and multiple lights per image."""

        model_transforms = camera_utils.euler_matrices(
            [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

        vertices_world_space = torch.matmul(
            torch.stack([self.cube_vertices, self.cube_vertices]),
            model_transforms.transpose())

        normals_world_space = torch.matmul(
            torch.stack([self.cube_normals, self.cube_normals]),
            model_transforms.transpose())

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.2, 18.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=torch.float32)
        world_up = torch.constant([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=torch.float32)
        fov_y = torch.tensor([40.0, 13.3], dtype=torch.float32)
        near_clip = 0.1
        far_clip = 25.0
        image_width = 640
        image_height = 480
        light_positions = torch.tensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                        [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
        light_intensities = torch.tensor(
            [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
             [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]],
            dtype=torch.float32)
        vertex_diffuse_colors = torch.tensor(2*[[[1.0, 0.0, 0.0],
                                                 [0.0, 1.0, 0.0],
                                                 [0.0, 0.0, 1.0],
                                                 [1.0, 1.0, 1.0],
                                                 [1.0, 1.0, 0.0],
                                                 [1.0, 0.0, 1.0],
                                                 [0.0, 1.0, 1.0],
                                                 [0.5, 0.5, 0.5]]],
                                             dtype=torch.float32)
        vertex_specular_colors = torch.tensor(2*[[[0.0, 1.0, 0.0],
                                                  [0.0, 0.0, 1.0],
                                                  [1.0, 1.0, 1.0],
                                                  [1.0, 1.0, 0.0],
                                                  [1.0, 0.0, 1.0],
                                                  [0.0, 1.0, 1.0],
                                                  [0.5, 0.5, 0.5],
                                                  [1.0, 0.0, 0.0]]],
                                              dtype=torch.float32)
        shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)
        ambient_color = torch.tensor([[0.0, 0.0, 0.0], [0.1, 0.1, 0.2]], dtype=torch.float32)
        renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            shininess_coefficients,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_renders = torch.cat([
                mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
                renders[:, :, :, 3:4]
            ],
            dim=3)

        # Check that shininess coefficient broadcasting works by also rendering
        # with a scalar shininess coefficient, and ensuring the result is identical:
        broadcasted_renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            6.0,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_broadcasted_renders = torch.cat([
                mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
                broadcasted_renders[:, :, :, 3:4]
            ],
            dim=3)

        for image_id in range(renders.shape[0]):
            target_image_name = "Colored_Cube_%i.png" % image_id
            baseline_image_path = os.path.join(self.test_data_directory,
                                               target_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_renders[image_id, :, :, :])
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_broadcasted_renders[image_id, :, :, :])
    def testThatCubeRotates(self):
        """Optimize a simple cube's rotation using pixel loss.

        The rotation is represented as static-basis euler angles. This test checks
        that the computed gradients are useful.
        """
        image_height = 480
        image_width = 640
        initial_euler_angles = [[0.0, 0.0, 0.0]]

        euler_angles = torch.tensor(initial_euler_angles, requires_grad=True)
        model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]
        model_rotation.requires_grad = True

        vertices_world_space = torch.reshape(
            torch.matmul(self.cube_vertices, model_rotation.transpose()),
            [1, 8, 3])

        normals_world_space = torch.reshape(
            torch.matmul(self.cube_normals, model_rotation.transpose()),
            [1, 8, 3])

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32)
        world_up = torch.tensor([[0.0, 1.0, 0.0]], dtype=torch.float32)

        vertex_diffuse_colors = torch.ones_like(vertices_world_space)
        light_positions = torch.reshape(eye, [1, 1, 3])
        light_intensities = torch.ones([1, 1, 3], dtype=torch.float32)

        # Pick the desired cube rotation for the test:
        test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0, 60.0]])[0, :3, :3]

        desired_vertex_positions = torch.reshape(
            torch.matmul(self.cube_vertices, test_model_rotation.transpose())
            [1, 8, 3])
        desired_normals = torch.reshape(
            torch.matmul(self.cube_normals, test_model_rotation.transpose()),
            [1, 8, 3])

        optimizer = torch.optim.SGD([euler_angles], lr=0.7, momentum=0.1)
        for _ in range(35):
            optimizer.zero_grad()
            render = mesh_renderer.mesh_renderer(
                vertices_world_space,
                self.cube_triangles,
                normals_world_space,
                vertex_diffuse_colors,
                eye,
                center,
                world_up,
                light_positions,
                light_intensities,
                image_width,
                image_height)
            desired_render = mesh_renderer.mesh_renderer(
                desired_vertex_positions,
                self.cube_triangles,
                desired_normals,
                vertex_diffuse_colors,
                eye,
                center,
                world_up,
                light_positions,
                light_intensities,
                image_width,
                image_height)
            loss = torch.mean(torch.abs(render - desired_render))
            loss.backward()
            optimizer.step()

        render = torch.reshape(render, [image_height, image_width, 4])
        desired_render = torch.reshape(desired_render, [image_height, image_width, 4])
        target_image_name = "Gray_Cube_0.png"
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, baseline_image_path, desired_render)
        test_utils.expect_image_file_and_render_are_near(
            self,
            baseline_image_path,
            render,
            max_outlier_fraction=0.01,
            pixel_error_threshold=0.04)