Beispiel #1
0
    def forward(self, euler_angles):
        model_rotation = camera_utils.euler_matrices(euler_angles)[:, :3, :3]
        vertices_world_space = self.cube_vertices.matmul(model_rotation.transpose(1, 2)).view(1, 8, 3)
        normals_world_space = self.cube_normals.matmul(model_rotation.transpose(1, 2)).view(1, 8, 3)

        rendered, geometry = mesh_renderer.mesh_renderer(
            vertices_world_space, self.cube_triangles, normals_world_space,
            self.vertex_diffuse_colors, self.eye, self.center, self.world_up,
            self.light_positions, self.light_intensities,
            self.image_width, self.image_height)
        return rendered.view(self.image_height, self.image_width, 4), geometry
        def render(euler_angles):
            device = euler_angles.device
            model_rotation = camera_utils.euler_matrices(euler_angles)[:, :3, :3]
            vertices_world_space = self.cube_vertices.to(device).matmul(model_rotation.transpose(1, 2)).view(1, 8, 3)
            normals_world_space = self.cube_normals.to(device).matmul(model_rotation.transpose(1, 2)).view(1, 8, 3)

            rendered, _ = mesh_renderer.mesh_renderer(
                vertices_world_space, self.cube_triangles.to(device), normals_world_space,
                vertex_diffuse_colors.to(device), eye.to(device), center.to(device), world_up.to(device),
                light_positions.to(device), light_intensities.to(device),
                image_width, image_height)
            return rendered.view(image_height, image_width, 4)
    def test_fullRenderGradientComputation(self):
        """Verifies the Jacobian matrix for the entire renderer.

        This ensures correct gradients are propagated backwards through the entire
        process, not just through the rasterization kernel. Uses the simple cube
        forward pass.
        """
        image_height = 21
        image_width = 28

        # rotate the cube for the test:
        model_transforms = camera_utils.euler_matrices(
            torch.FloatTensor([[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]]))[:, :3, :3]

        # camera position:
        eye = torch.FloatTensor([0.0, 0.0, 6.0])
        center = torch.FloatTensor([0.0, 0.0, 0.0])
        world_up = torch.FloatTensor([0.0, 1.0, 0.0])

        # Scene has a single light from the viewer's eye.
        light_positions = torch.cat([eye.view(1, 1, 3)]*2, dim=0)
        light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)

        vertex_diffuse_colors = torch.ones([2, 8, 3], dtype=torch.float32)

        def renderer(cube_vertices):
            device = cube_vertices.device
            vertices_world_space = torch.stack([cube_vertices] * 2).matmul(
                model_transforms.to(device).transpose(1, 2))
            # cube_normals = nn.functional.normalize(cube_vertices, p=2, dim=1)
            normals_world_space = torch.stack([self.cube_normals.to(device)] * 2).matmul(
                model_transforms.to(device).transpose(1, 2))
            rendered, _ = mesh_renderer.mesh_renderer(
                vertices_world_space.to(device), self.cube_triangles.to(device),
                normals_world_space.to(device), vertex_diffuse_colors.to(device),
                eye.to(device), center.to(device), world_up.to(device), light_positions.to(device),
                light_intensities.to(device), image_width, image_height)
            return rendered

        def runOnDevice(device):
            cube_vertices = self.cube_vertices.clone().to(device).float().requires_grad_(True)
            simple_gradcheck(self, renderer, cube_vertices, 1e-3, 0.01, 0.01)

        self.runOnMultiDevice(runOnDevice)
    def test_renderSimpleCube(self):
        """Renders a simple cube to test the full forward pass.

        Verifies the functionality of both the custom kernel and the python wrapper.
        """
        model_transforms = camera_utils.euler_matrices(
            torch.FloatTensor([[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]]))[:, :3, :3]

        vertices_world_space = torch.stack([self.cube_vertices] * 2).matmul(
            model_transforms.transpose(1, 2))

        normals_world_space = torch.stack([self.cube_normals] * 2).matmul(
            model_transforms.transpose(1, 2))

        # camera position:
        eye = torch.FloatTensor(2 * [[0.0, 0.0, 6.0]])
        center = torch.FloatTensor(2 * [[0.0, 0.0, 0.0]])
        world_up = torch.FloatTensor(2 * [[0.0, 1.0, 0.0]])
        image_width = 640
        image_height = 480
        light_positions = torch.FloatTensor([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])
        light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)
        vertex_diffuse_colors = torch.ones_like(vertices_world_space)

        # execute rendering
        def runOnDevice(device):
            renders, _ = mesh_renderer.mesh_renderer(
                vertices_world_space.to(device), self.cube_triangles.to(device),
                normals_world_space.to(device), vertex_diffuse_colors.to(device),
                eye.to(device), center.to(device), world_up.to(device), light_positions.to(device),
                light_intensities.to(device), image_width, image_height)

            for image_id in range(renders.shape[0]):
                target_image_name = 'Gray_Cube_{}.png'.format(image_id)
                expect_image_file_and_render_are_near(
                    self, target_image_name, renders[image_id, :, :, :].cpu().numpy())

        self.runOnMultiDevice(runOnDevice)
    def test_complexShading(self):
        """Tests specular highlights, colors, and multiple lights per image."""
        # rotate the cube for the test:
        model_transforms = camera_utils.euler_matrices(
            torch.FloatTensor([[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]]))[:, :3, :3]

        vertices_world_space = torch.stack([self.cube_vertices] * 2).matmul(
            model_transforms.transpose(1, 2))

        normals_world_space = torch.stack([self.cube_normals] * 2).matmul(
            model_transforms.transpose(1, 2))

        # camera position:
        eye = torch.FloatTensor([[0.0, 0.0, 6.0], [0., 0.2, 18.0]])
        center = torch.FloatTensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]])
        world_up = torch.FloatTensor([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]])
        fov_y = torch.FloatTensor([40., 13.3])
        near_clip = 0.1
        far_clip = 25.
        image_width = 640
        image_height = 480
        light_positions = torch.FloatTensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                             [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
        light_intensities = torch.FloatTensor([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
                                               [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]])
        # colors
        vertex_diffuse_colors = torch.FloatTensor(2*[[[1.0, 0.0, 0.0],
                                                      [0.0, 1.0, 0.0],
                                                      [0.0, 0.0, 1.0],
                                                      [1.0, 1.0, 1.0],
                                                      [1.0, 1.0, 0.0],
                                                      [1.0, 0.0, 1.0],
                                                      [0.0, 1.0, 1.0],
                                                      [0.5, 0.5, 0.5]]])
        vertex_specular_colors = torch.FloatTensor(2*[[[0.0, 1.0, 0.0],
                                                       [0.0, 0.0, 1.0],
                                                       [1.0, 1.0, 1.0],
                                                       [1.0, 1.0, 0.0],
                                                       [1.0, 0.0, 1.0],
                                                       [0.0, 1.0, 1.0],
                                                       [0.5, 0.5, 0.5],
                                                       [1.0, 0.0, 0.0]]])
        shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)
        ambient_color = torch.FloatTensor([[0., 0., 0.], [0.1, 0.1, 0.2]])

        # execute rendering
        def runOnDevice(device):
            renders, _ = mesh_renderer.mesh_renderer(
                vertices_world_space.to(device), self.cube_triangles.to(device),
                normals_world_space.to(device), vertex_diffuse_colors.to(device),
                eye.to(device), center.to(device), world_up.to(device), light_positions.to(device),
                light_intensities.to(device), image_width, image_height,
                vertex_specular_colors.to(device), shininess_coefficients.to(device),
                ambient_color.to(device), fov_y.to(device), near_clip, far_clip)

            tonemapped_renders = torch.cat(
                [
                    mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
                    renders[:, :, :, 3:4]
                ],
                dim=3)

            # Check that shininess coefficient broadcasting works by also rendering
            # with a scalar shininess coefficient, and ensuring the result is identical:
            broadcasted_renders, _ = mesh_renderer.mesh_renderer(
                vertices_world_space.to(device), self.cube_triangles.to(device),
                normals_world_space.to(device), vertex_diffuse_colors.to(device),
                eye.to(device), center.to(device), world_up.to(device), light_positions.to(device),
                light_intensities.to(device), image_width, image_height,
                vertex_specular_colors.to(device), 6.0,
                ambient_color.to(device), fov_y.to(device), near_clip, far_clip)

            tonemapped_broadcasted_renders = torch.cat(
                [
                    mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
                    broadcasted_renders[:, :, :, 3:4]
                ],
                dim=3)

            for image_id in range(renders.shape[0]):
                target_image_name = 'Colored_Cube_{}.png'.format(image_id)
                expect_image_file_and_render_are_near(
                    self, target_image_name,
                    tonemapped_renders[image_id, :, :, :].cpu().numpy())
                expect_image_file_and_render_are_near(
                    self, target_image_name,
                    tonemapped_broadcasted_renders[image_id, :, :, :].cpu().numpy())

        self.runOnMultiDevice(runOnDevice)