Пример #1
0
  def testComplexShading(self):
    """Tests specular highlights, colors, and multiple lights per image."""
    # rotate the cube for the test:
    model_transforms = camera_utils.euler_matrices(
        [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

    vertices_world_space = tf.matmul(
        tf.stack([self.cube_vertices, self.cube_vertices]),
        model_transforms,
        transpose_b=True)

    normals_world_space = tf.matmul(
        tf.stack([self.cube_normals, self.cube_normals]),
        model_transforms,
        transpose_b=True)

    # camera position:
    eye = tf.constant([[0.0, 0.0, 6.0], [0., 0.2, 18.0]], dtype=tf.float32)
    center = tf.constant([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=tf.float32)
    world_up = tf.constant(
        [[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=tf.float32)
    fov_y = tf.constant([40., 13.3], dtype=tf.float32)
    near_clip = tf.constant(0.1, dtype=tf.float32)
    far_clip = tf.constant(25.0, dtype=tf.float32)
    image_width = 640
    image_height = 480
    light_positions = tf.constant([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                   [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
    light_intensities = tf.constant(
        [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 0.0, 1.0], [0.0, 2.0,
                                                                1.0]]],
        dtype=tf.float32)
    # pyformat: disable
    vertex_diffuse_colors = tf.constant(2*[[[1.0, 0.0, 0.0],
                                            [0.0, 1.0, 0.0],
                                            [0.0, 0.0, 1.0],
                                            [1.0, 1.0, 1.0],
                                            [1.0, 1.0, 0.0],
                                            [1.0, 0.0, 1.0],
                                            [0.0, 1.0, 1.0],
                                            [0.5, 0.5, 0.5]]],
                                        dtype=tf.float32)
    vertex_specular_colors = tf.constant(2*[[[0.0, 1.0, 0.0],
                                             [0.0, 0.0, 1.0],
                                             [1.0, 1.0, 1.0],
                                             [1.0, 1.0, 0.0],
                                             [1.0, 0.0, 1.0],
                                             [0.0, 1.0, 1.0],
                                             [0.5, 0.5, 0.5],
                                             [1.0, 0.0, 0.0]]],
                                         dtype=tf.float32)
    # pyformat: enable
    shininess_coefficients = 6.0 * tf.ones([2, 8], dtype=tf.float32)
    ambient_color = tf.constant(
        [[0., 0., 0.], [0.1, 0.1, 0.2]], dtype=tf.float32)
    renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        shininess_coefficients, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
            renders[:, :, :, 3:4]
        ],
        axis=3)

    # Check that shininess coefficient broadcasting works by also rendering
    # with a scalar shininess coefficient, and ensuring the result is identical:
    broadcasted_renders = mesh_renderer.mesh_renderer(
        vertices_world_space, self.cube_triangles, normals_world_space,
        vertex_diffuse_colors, eye, center, world_up, light_positions,
        light_intensities, image_width, image_height, vertex_specular_colors,
        6.0, ambient_color, fov_y, near_clip, far_clip)
    tonemapped_broadcasted_renders = tf.concat(
        [
            mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
            broadcasted_renders[:, :, :, 3:4]
        ],
        axis=3)

    with self.test_session() as sess:
      images, broadcasted_images = sess.run(
          [tonemapped_renders, tonemapped_broadcasted_renders], feed_dict={})

      for image_id in range(images.shape[0]):
        target_image_name = 'Colored_Cube_%i.png' % image_id
        baseline_image_path = os.path.join(self.test_data_directory,
                                           target_image_name)
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path, images[image_id, :, :, :])
        test_utils.expect_image_file_and_render_are_near(
            self, sess, baseline_image_path,
            broadcasted_images[image_id, :, :, :])
Пример #2
0
    def testComplexShading(self):
        """Test specular highlights, colors, and multiple lights per image."""

        model_transforms = camera_utils.euler_matrices(
            [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

        vertices_world_space = torch.matmul(
            torch.stack([self.cube_vertices, self.cube_vertices]),
            model_transforms.transpose())

        normals_world_space = torch.matmul(
            torch.stack([self.cube_normals, self.cube_normals]),
            model_transforms.transpose())

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.2, 18.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=torch.float32)
        world_up = torch.constant([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=torch.float32)
        fov_y = torch.tensor([40.0, 13.3], dtype=torch.float32)
        near_clip = 0.1
        far_clip = 25.0
        image_width = 640
        image_height = 480
        light_positions = torch.tensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                        [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
        light_intensities = torch.tensor(
            [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
             [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]],
            dtype=torch.float32)
        vertex_diffuse_colors = torch.tensor(2*[[[1.0, 0.0, 0.0],
                                                 [0.0, 1.0, 0.0],
                                                 [0.0, 0.0, 1.0],
                                                 [1.0, 1.0, 1.0],
                                                 [1.0, 1.0, 0.0],
                                                 [1.0, 0.0, 1.0],
                                                 [0.0, 1.0, 1.0],
                                                 [0.5, 0.5, 0.5]]],
                                             dtype=torch.float32)
        vertex_specular_colors = torch.tensor(2*[[[0.0, 1.0, 0.0],
                                                  [0.0, 0.0, 1.0],
                                                  [1.0, 1.0, 1.0],
                                                  [1.0, 1.0, 0.0],
                                                  [1.0, 0.0, 1.0],
                                                  [0.0, 1.0, 1.0],
                                                  [0.5, 0.5, 0.5],
                                                  [1.0, 0.0, 0.0]]],
                                              dtype=torch.float32)
        shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)
        ambient_color = torch.tensor([[0.0, 0.0, 0.0], [0.1, 0.1, 0.2]], dtype=torch.float32)
        renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            shininess_coefficients,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_renders = torch.cat([
                mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
                renders[:, :, :, 3:4]
            ],
            dim=3)

        # Check that shininess coefficient broadcasting works by also rendering
        # with a scalar shininess coefficient, and ensuring the result is identical:
        broadcasted_renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            6.0,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_broadcasted_renders = torch.cat([
                mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
                broadcasted_renders[:, :, :, 3:4]
            ],
            dim=3)

        for image_id in range(renders.shape[0]):
            target_image_name = "Colored_Cube_%i.png" % image_id
            baseline_image_path = os.path.join(self.test_data_directory,
                                               target_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_renders[image_id, :, :, :])
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_broadcasted_renders[image_id, :, :, :])