def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly
        """
        device = torch.device("cuda:0")
        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(lights=lights,
                                           cameras=cameras,
                                           materials=materials),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map.png")

        # There's a calculation instability on the corner of the ear of the cow.
        # We ignore that pixel.
        image_ref[137, 166] = 0
        rgb[137, 166] = 0

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        #################################
        # Add blurring to rasterization
        #################################

        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
            bin_size=0,
        )

        images = renderer(
            mesh.clone(),
            raster_settings=raster_settings,
            blend_params=blend_params,
        )
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_blurry_textured_rendering.png")

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
    def test_convert_clipped_to_unclipped_case_4(self):
        """
        Test with a single case 4 triangle which is clipped into
        a quadrilateral and subdivided.
        """
        device = "cuda:0"
        # fmt: off
        verts = torch.tensor(
            [
                [-1.0, 0.0, -1.0],  # noqa: E241, E201
                [0.0, 1.0, -1.0],  # noqa: E241, E201
                [1.0, 0.0, -1.0],  # noqa: E241, E201
                [0.0, -1.0, -1.0],  # noqa: E241, E201
                [-1.0, 0.5, 0.5],  # noqa: E241, E201
                [1.0, 1.0, 1.0],  # noqa: E241, E201
                [0.0, -1.0, 1.0],  # noqa: E241, E201
                [-1.0, 0.5, -0.5],  # noqa: E241, E201
                [1.0, 1.0, -1.0],  # noqa: E241, E201
                [-1.0, 0.0, 1.0],  # noqa: E241, E201
                [0.0, 1.0, 1.0],  # noqa: E241, E201
                [1.0, 0.0, 1.0],  # noqa: E241, E201
            ],
            dtype=torch.float32,
            device=device,
        )
        faces = torch.tensor(
            [
                [0, 1, 2],  # noqa: E241, E201  Case 2 fully clipped
                [3, 4, 5],  # noqa: E241, E201  Case 4 clipped and subdivided
                [5, 4, 3],  # noqa: E241, E201  Repeat of Case 4
                [6, 7, 8],  # noqa: E241, E201  Case 3 clipped
                [9, 10, 11],  # noqa: E241, E201  Case 1 untouched
            ],
            dtype=torch.int64,
            device=device,
        )
        # fmt: on
        meshes = Meshes(verts=[verts], faces=[faces])

        # Clip meshes
        clipped_faces = self.clip_faces(meshes)

        # 4x faces (from Case 4) + 1 (from Case 3) + 1 (from Case 1)
        self.assertEqual(clipped_faces.face_verts.shape[0], 6)

        image_size = (10, 10)
        blur_radius = 0.05
        faces_per_pixel = 2
        perspective_correct = True
        bin_size = 0
        max_faces_per_bin = 20
        clip_barycentric_coords = False
        cull_backfaces = False

        # Rasterize clipped mesh
        pix_to_face, zbuf, barycentric_coords, dists = _RasterizeFaceVerts.apply(
            clipped_faces.face_verts,
            clipped_faces.mesh_to_face_first_idx,
            clipped_faces.num_faces_per_mesh,
            clipped_faces.clipped_faces_neighbor_idx,
            image_size,
            blur_radius,
            faces_per_pixel,
            bin_size,
            max_faces_per_bin,
            perspective_correct,
            clip_barycentric_coords,
            cull_backfaces,
        )

        # Convert outputs so they are in terms of the unclipped mesh.
        outputs = convert_clipped_rasterization_to_original_faces(
            pix_to_face,
            barycentric_coords,
            clipped_faces,
        )
        pix_to_face_unclipped, barycentric_coords_unclipped = outputs

        # In the clipped mesh there are more faces than in the unclipped mesh
        self.assertTrue(pix_to_face.max() > pix_to_face_unclipped.max())
        # Unclipped pix_to_face indices must be in the limit of the number
        # of faces in the unclipped mesh.
        self.assertTrue(pix_to_face_unclipped.max() < faces.shape[0])
    def test_join_atlas(self):
        """Meshes with TexturesAtlas joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 1.2
        verts_shifted1[:, 0] += 4
        verts_shifted1[:, 1] += 5
        verts[:, 0] -= 4
        verts[:, 1] -= 4

        [faces] = plain_torus.faces_list()
        map_size = 3
        # Two random atlases.
        # The averaging of the random numbers here is not consistent with the
        # meaning of the atlases, but makes each face a bit smoother than
        # if everything had a random color.
        atlas1 = torch.rand(size=(faces.shape[0], map_size, map_size, 3),
                            device=device)
        atlas1[:, 1] = 0.5 * atlas1[:, 0] + 0.5 * atlas1[:, 2]
        atlas1[:, :, 1] = 0.5 * atlas1[:, :, 0] + 0.5 * atlas1[:, :, 2]
        atlas2 = torch.rand(size=(faces.shape[0], map_size, map_size, 3),
                            device=device)
        atlas2[:, 1] = 0.5 * atlas2[:, 0] + 0.5 * atlas2[:, 2]
        atlas2[:, :, 1] = 0.5 * atlas2[:, :, 0] + 0.5 * atlas2[:, :, 2]

        textures1 = TexturesAtlas(atlas=[atlas1])
        textures2 = TexturesAtlas(atlas=[atlas2])
        mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1],
                       faces=[faces],
                       textures=textures2)
        mesh_joined = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        output = renderer(mesh_joined)

        image_ref = load_rgb_image("test_joinatlas_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray((output[0, ..., :3].cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "test_joinatlas_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinatlas_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinatlas_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
Exemple #4
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        #################################
        # Add blurring to rasterization
        #################################
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
        )

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
            )
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png"
                )

            self.assertClose(rgb, image_ref, atol=0.05)
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded,
                             faces=faces_padded,
                             textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = "_"
        for cam_type in (
                FoVPerspectiveCameras,
                FoVOrthographicCameras,
                PerspectiveCameras,
                OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0],
                                           device=device)[None]

            raster_settings = RasterizationSettings(image_size=512,
                                                    blur_radius=0.0,
                                                    faces_per_pixel=1)
            rasterizer = MeshRasterizer(cameras=cameras,
                                        raster_settings=raster_settings)
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            # Test several shaders
            shaders = {
                "phong": HardPhongShader,
                "gouraud": HardGouraudShader,
                "flat": HardFlatShader,
            }
            for (name, shader_init) in shaders.items():
                shader = shader_init(
                    lights=lights,
                    cameras=cameras,
                    materials=materials,
                    blend_params=blend_params,
                )
                renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
                images = renderer(sphere_mesh)
                rgb = images[0, ..., :3].squeeze().cpu()
                filename = "simple_sphere_light_%s%s%s.png" % (
                    name,
                    postfix,
                    cam_type.__name__,
                )

                image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

                if DEBUG:
                    filename = "DEBUG_%s" % filename
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename)

            ########################################################
            # Move the light to the +z axis in world space so it is
            # behind the sphere. Note that +Z is in, +Y up,
            # +X left for both world and camera space.
            ########################################################
            lights.location[..., 2] = -2.0
            phong_shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            phong_renderer = MeshRenderer(rasterizer=rasterizer,
                                          shader=phong_shader)
            images = phong_renderer(sphere_mesh, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_simple_sphere_dark%s%s.png" % (
                    postfix,
                    cam_type.__name__,
                )
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename)

            image_ref_phong_dark = load_rgb_image(
                "test_simple_sphere_dark%s%s.png" %
                (postfix, cam_type.__name__),
                DATA_DIR,
            )
            self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
    def test_join_uvs(self):
        """Meshes with TexturesUV joined into a scene"""
        # Test the result of rendering three tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        # This tests TexturesUV.join_scene with rectangle flipping,
        # and we check the form of the merged map as well.
        torch.manual_seed(1)
        device = torch.device("cuda:0")

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7
        verts_shifted2 = verts.clone()
        verts_shifted2 *= 0.5
        verts_shifted2[:, 1] -= 7

        [faces] = plain_torus.faces_list()
        nocolor = torch.zeros((100, 100), device=device)
        color_gradient = torch.linspace(0, 1, steps=100, device=device)
        color_gradient1 = color_gradient[None].expand_as(nocolor)
        color_gradient2 = color_gradient[:, None].expand_as(nocolor)
        colors1 = torch.stack([nocolor, color_gradient1, color_gradient2],
                              dim=2)
        colors2 = torch.stack([color_gradient1, color_gradient2, nocolor],
                              dim=2)
        verts_uvs1 = torch.rand(size=(verts.shape[0], 2), device=device)
        verts_uvs2 = torch.rand(size=(verts.shape[0], 2), device=device)

        for i, align_corners, padding_mode in [
            (0, True, "border"),
            (1, False, "border"),
            (2, False, "zeros"),
        ]:
            textures1 = TexturesUV(
                maps=[colors1],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs1],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )

            # These downsamplings of colors2 are chosen to ensure a flip and a non flip
            # when the maps are merged.
            # We have maps of size (100, 100), (50, 99) and (99, 50).
            textures2 = TexturesUV(
                maps=[colors2[::2, :-1]],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            offset = torch.tensor([0, 0, 0.5], device=device)
            textures3 = TexturesUV(
                maps=[colors2[:-1, ::2] + offset],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
            mesh2 = Meshes(verts=[verts_shifted1],
                           faces=[faces],
                           textures=textures2)
            mesh3 = Meshes(verts=[verts_shifted2],
                           faces=[faces],
                           textures=textures3)
            mesh = join_meshes_as_scene([mesh1, mesh2, mesh3])

            output = renderer(mesh)[0, ..., :3].cpu()
            output1 = renderer(mesh1)[0, ..., :3].cpu()
            output2 = renderer(mesh2)[0, ..., :3].cpu()
            output3 = renderer(mesh3)[0, ..., :3].cpu()
            # The background color is white and the objects do not overlap, so we can
            # predict the merged image by taking the minimum over every channel
            merged = torch.min(torch.min(output1, output2), output3)

            image_ref = load_rgb_image(f"test_joinuvs{i}_final.png", DATA_DIR)
            map_ref = load_rgb_image(f"test_joinuvs{i}_map.png", DATA_DIR)

            if DEBUG:
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_final_.png")
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_merged.png")

                Image.fromarray((output1.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_1.png")
                Image.fromarray((output2.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_2.png")
                Image.fromarray((output3.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_3.png")
                Image.fromarray((mesh.textures.maps_padded()[0].cpu().numpy() *
                                 255).astype(np.uint8)).save(
                                     DATA_DIR / f"test_joinuvs{i}_map_.png")
                Image.fromarray(
                    (mesh2.textures.maps_padded()[0].cpu().numpy() *
                     255).astype(np.uint8)).save(DATA_DIR /
                                                 f"test_joinuvs{i}_map2.png")
                Image.fromarray(
                    (mesh3.textures.maps_padded()[0].cpu().numpy() *
                     255).astype(np.uint8)).save(DATA_DIR /
                                                 f"test_joinuvs{i}_map3.png")

            self.assertClose(output, merged, atol=0.015)
            self.assertClose(output, image_ref, atol=0.05)
            self.assertClose(mesh.textures.maps_padded()[0].cpu(),
                             map_ref,
                             atol=0.05)
Exemple #7
0
    def test_padding(self):
        N, V, F = 10, 100, 300
        device = torch.device("cuda:0")
        verts, faces = [], []
        valid = torch.randint(2, size=(N, ), dtype=torch.uint8, device=device)
        num_verts, num_faces = (
            torch.zeros(N, dtype=torch.int32),
            torch.zeros(N, dtype=torch.int32),
        )
        for n in range(N):
            verts.append(torch.rand((V, 3), dtype=torch.float32,
                                    device=device))
            this_faces = torch.full((F, 3),
                                    -1,
                                    dtype=torch.int64,
                                    device=device)
            if valid[n]:
                v = torch.randint(3,
                                  high=V,
                                  size=(1, ),
                                  dtype=torch.int32,
                                  device=device)[0]
                f = torch.randint(F,
                                  size=(1, ),
                                  dtype=torch.int32,
                                  device=device)[0]
                this_faces[:f, :] = torch.randint(v,
                                                  size=(f, 3),
                                                  dtype=torch.int64,
                                                  device=device)
                num_verts[n] = v
                num_faces[n] = f
            faces.append(this_faces)

        mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))

        # Check verts/faces per mesh are set correctly in init.
        self.assertListEqual(mesh._num_faces_per_mesh.tolist(),
                             num_faces.tolist())
        self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N)

        for n, (vv, ff) in enumerate(zip(mesh.verts_list(),
                                         mesh.faces_list())):
            self.assertClose(ff, faces[n][:num_faces[n]])
            self.assertClose(vv, verts[n])

        new_faces = [ff.clone() for ff in faces]
        v = torch.randint(3,
                          high=V,
                          size=(1, ),
                          dtype=torch.int32,
                          device=device)[0]
        f = torch.randint(F - 10, size=(1, ), dtype=torch.int32,
                          device=device)[0]
        this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
        this_faces[10:f + 10, :] = torch.randint(v,
                                                 size=(f, 3),
                                                 dtype=torch.int64,
                                                 device=device)
        new_faces[3] = this_faces

        with self.assertRaisesRegex(ValueError, "Padding of faces"):
            Meshes(verts=torch.stack(verts), faces=torch.stack(new_faces))
    def test_simple_sphere_outside_zfar(self):
        """
        Test output when rendering a sphere that is beyond zfar with a SoftPhongShader.
        This renders a sphere of radius 500, with the camera at x=1500 for different
        settings of zfar.  This is intended to check 1) setting cameras.zfar propagates
        to the blender and that the rendered sphere is (soft) clipped if it is beyond
        zfar, 2) make sure there are no numerical precision/overflow errors associated
        with larger world coordinates
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded() * 500
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded,
                             faces=faces_padded,
                             textures=textures)

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +1000.0],
                                       device=device)[None]

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)
        for zfar in (10000.0, 100.0):
            cameras = FoVPerspectiveCameras(device=device,
                                            R=R,
                                            T=T,
                                            aspect_ratio=1.0,
                                            fov=60.0,
                                            zfar=zfar)
            rasterizer = MeshRasterizer(cameras=cameras,
                                        raster_settings=raster_settings)
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0))

            shader = SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar)

            # Load reference image
            image_ref = load_rgb_image(filename, DATA_DIR)

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_" + filename))

            self.assertClose(rgb, image_ref, atol=0.05)
Exemple #9
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gourad shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        if elevated_camera:
            R, T = look_at_view_transform(2.7, 45.0, 0.0)
            postfix = "_elevated_camera"
        else:
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Init renderer
        rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings
        )
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=PhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix
            )

        # Load reference image
        image_ref_phong = load_rgb_image(
            "test_simple_sphere_illuminated%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))

        ###################################
        # Move the light behind the object
        ###################################
        # Check the image is dark
        lights.location[..., 2] = +2.0
        images = renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix
            )

        # Load reference image
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))

        ######################################
        # Change the shader to a GouradShader
        ######################################
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=GouradShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light_gourad%s.png" % postfix
            )

        # Load reference image
        image_ref_gourad = load_rgb_image(
            "test_simple_sphere_light_gourad%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_gourad, atol=0.005))
        self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))
Exemple #10
0
    def init_mesh(
        num_meshes: int = 10,
        max_v: int = 100,
        max_f: int = 300,
        lists_to_tensors: bool = False,
        device: str = "cpu",
        requires_grad: bool = False,
    ):
        """
        Function to generate a Meshes object of N meshes with
        random numbers of vertices and faces.

        Args:
            num_meshes: Number of meshes to generate.
            max_v: Max number of vertices per mesh.
            max_f: Max number of faces per mesh.
            lists_to_tensors: Determines whether the generated meshes should be
                              constructed from lists (=False) or
                              a tensor (=True) of faces/verts.

        Returns:
            Meshes object.
        """
        device = torch.device(device)

        verts_list = []
        faces_list = []

        # Randomly generate numbers of faces and vertices in each mesh.
        if lists_to_tensors:
            # If we define faces/verts with tensors, f/v has to be the
            # same for each mesh in the batch.
            f = torch.randint(max_f, size=(1, ), dtype=torch.int32)
            v = torch.randint(3, high=max_v, size=(1, ), dtype=torch.int32)
            f = f.repeat(num_meshes)
            v = v.repeat(num_meshes)
        else:
            # For lists of faces and vertices, we can sample different v/f
            # per mesh.
            f = torch.randint(max_f, size=(num_meshes, ), dtype=torch.int32)
            v = torch.randint(3,
                              high=max_v,
                              size=(num_meshes, ),
                              dtype=torch.int32)

        # Generate the actual vertices and faces.
        for i in range(num_meshes):
            verts = torch.rand(
                (v[i], 3),
                dtype=torch.float32,
                device=device,
                requires_grad=requires_grad,
            )
            faces = torch.randint(v[i],
                                  size=(f[i], 3),
                                  dtype=torch.int64,
                                  device=device)
            verts_list.append(verts)
            faces_list.append(faces)

        if lists_to_tensors:
            verts_list = torch.stack(verts_list)
            faces_list = torch.stack(faces_list)

        return Meshes(verts=verts_list, faces=faces_list)
Exemple #11
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly
        """
        device = torch.device("cuda:0")
        DATA_DIR = (
            Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        )
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(obj_filename)
        faces_idx = faces.verts_idx.to(device)
        verts = verts.to(device)
        texture_uvs = aux.verts_uvs
        materials = aux.material_colors
        tex_maps = aux.texture_images

        # tex_maps is a dictionary of material names as keys and texture images
        # as values. Only need the images for this example.
        textures = Textures(
            maps=list(tex_maps.values()),
            faces_uvs=faces.textures_idx.to(torch.int64).to(device)[None, :],
            verts_uvs=texture_uvs.to(torch.float32).to(device)[None, :],
        )
        mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            shader=TexturedPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map.png"
            )

        # There's a calculation instability on the corner of the ear of the cow.
        # We ignore that pixel.
        image_ref[137, 166] = 0
        rgb[137, 166] = 0

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        verts = verts.clone()
        verts.requires_grad = True
        mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)
        images = renderer(mesh)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.
    def test_case_3(self):
        """
        Case 3 triangles have exactly two vertices behind the clipping plane (z=0) so are
        clipped into a smaller triangle.

        Test with a single triangle parallel to the z axis which intersects with
        the image plane.
        """

        device = "cuda:0"
        verts = torch.tensor(
            [[-1.0, 0.0, -1.0], [0.0, 0.0, 1.0], [1.0, 0.0, -1.0]],
            dtype=torch.float32,
            device=device,
        )
        faces = torch.tensor(
            [
                [0, 1, 2],
            ],
            dtype=torch.int64,
            device=device,
        )
        meshes = Meshes(verts=[verts], faces=[faces])
        clipped_faces = self.clip_faces(meshes)

        zero_t = torch.zeros(size=(1,), dtype=torch.int64, device=device)
        clipped_face_verts = torch.tensor(
            [
                [
                    [0.4950, 0.0000, 0.0100],
                    [-0.4950, 0.0000, 0.0100],
                    [0.0000, 0.0000, 1.0000],
                ]
            ],
            device=device,
            dtype=torch.float32,
        )

        # barycentric_conversion[i, :, k] stores the barycentric weights
        # in terms of the world coordinates of the original
        # (big) triangle for the kth vertex in the clipped (small) triangle.
        barycentric_conversion = torch.tensor(
            [
                [
                    [0.0000, 0.4950, 0.0000],
                    [0.5050, 0.5050, 1.0000],
                    [0.4950, 0.0000, 0.0000],
                ]
            ],
            device=device,
            dtype=torch.float32,
        )

        self.assertClose(clipped_faces.face_verts, clipped_face_verts)
        self.assertEqual(clipped_faces.mesh_to_face_first_idx.item(), 0)
        self.assertEqual(clipped_faces.num_faces_per_mesh.item(), 1)
        self.assertClose(clipped_faces.faces_clipped_to_unclipped_idx, zero_t)
        self.assertClose(clipped_faces.faces_clipped_to_conversion_idx, zero_t)
        self.assertClose(
            clipped_faces.clipped_faces_neighbor_idx,
            zero_t - 1,  # default is -1
        )
        self.assertClose(clipped_faces.barycentric_conversion, barycentric_conversion)
Exemple #13
0
    def test_assigned_normals(self):
        verts = torch.rand(2, 6, 3)
        faces = torch.randint(6, size=(2, 4, 3))
        no_normals = Meshes(verts=verts, faces=faces)
        self.assertFalse(no_normals.has_verts_normals())

        for verts_normals in [list(verts.unbind(0)), verts]:
            yes_normals = Meshes(
                verts=verts.clone(), faces=faces, verts_normals=verts_normals
            )
            self.assertTrue(yes_normals.has_verts_normals())
            self.assertClose(yes_normals.verts_normals_padded(), verts)
            yes_normals.offset_verts_(torch.FloatTensor([1, 2, 3]))
            self.assertClose(yes_normals.verts_normals_padded(), verts)
            yes_normals.offset_verts_(torch.FloatTensor([1, 2, 3]).expand(12, 3))
            self.assertFalse(torch.allclose(yes_normals.verts_normals_padded(), verts))
Exemple #14
0
    def test_mesh_normal_consistency_simple(self):
        r"""
        Mesh 1:
                        v3
                        /\
                       /  \
                   e4 / f1 \ e3
                     /      \
                 v2 /___e2___\ v1
                    \        /
                     \      /
                 e1   \ f0 / e0
                       \  /
                        \/
                        v0
        """
        device = torch.device("cuda:0")
        # mesh1 shown above
        verts1 = torch.rand((4, 3), dtype=torch.float32, device=device)
        faces1 = torch.tensor([[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device)

        # mesh2 is a cuboid with 8 verts, 12 faces and 18 edges
        verts2 = torch.tensor(
            [
                [0, 0, 0],
                [0, 0, 1],
                [0, 1, 0],
                [0, 1, 1],
                [1, 0, 0],
                [1, 0, 1],
                [1, 1, 0],
                [1, 1, 1],
            ],
            dtype=torch.float32,
            device=device,
        )
        faces2 = torch.tensor(
            [
                [0, 1, 2],
                [1, 3, 2],  # left face: 0, 1
                [2, 3, 6],
                [3, 7, 6],  # bottom face: 2, 3
                [0, 2, 6],
                [0, 6, 4],  # front face: 4, 5
                [0, 5, 1],
                [0, 4, 5],  # up face: 6, 7
                [6, 7, 5],
                [6, 5, 4],  # right face: 8, 9
                [1, 7, 3],
                [1, 5, 7],  # back face: 10, 11
            ],
            dtype=torch.int64,
            device=device,
        )

        # mesh3 is like mesh1 but with another face added to e2
        verts3 = torch.rand((5, 3), dtype=torch.float32, device=device)
        faces3 = torch.tensor(
            [[0, 1, 2], [2, 1, 3], [2, 1, 4]], dtype=torch.int64, device=device
        )

        meshes = Meshes(verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3])

        # mesh1: normal consistency computation
        n0 = (verts1[1] - verts1[2]).cross(verts1[3] - verts1[2])
        n1 = (verts1[1] - verts1[2]).cross(verts1[0] - verts1[2])
        loss1 = 1.0 - torch.cosine_similarity(n0.view(1, 3), -(n1.view(1, 3)))

        # mesh2: normal consistency computation
        # In the cube mesh, 6 edges are shared with coplanar faces (loss=0),
        # 12 edges are shared by perpendicular faces (loss=1)
        loss2 = 12.0 / 18

        # mesh3
        n0 = (verts3[1] - verts3[2]).cross(verts3[3] - verts3[2])
        n1 = (verts3[1] - verts3[2]).cross(verts3[0] - verts3[2])
        n2 = (verts3[1] - verts3[2]).cross(verts3[4] - verts3[2])
        loss3 = (
            3.0
            - torch.cosine_similarity(n0.view(1, 3), -(n1.view(1, 3)))
            - torch.cosine_similarity(n0.view(1, 3), -(n2.view(1, 3)))
            - torch.cosine_similarity(n1.view(1, 3), -(n2.view(1, 3)))
        )
        loss3 /= 3.0

        loss = (loss1 + loss2 + loss3) / 3.0

        out = mesh_normal_consistency(meshes)

        self.assertTrue(torch.allclose(out, loss))
Exemple #15
0
    def test_compute_normals(self):

        # Simple case with one mesh where normals point in either +/- ijk
        verts = torch.tensor(
            [
                [0.1, 0.3, 0.0],
                [0.5, 0.2, 0.0],
                [0.6, 0.8, 0.0],
                [0.0, 0.3, 0.2],
                [0.0, 0.2, 0.5],
                [0.0, 0.8, 0.7],
                [0.5, 0.0, 0.2],
                [0.6, 0.0, 0.5],
                [0.8, 0.0, 0.7],
                [0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
            ],
            dtype=torch.float32,
        )
        faces = torch.tensor(
            [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=torch.int64
        )
        mesh = Meshes(verts=[verts], faces=[faces])

        verts_normals_expected = torch.tensor(
            [
                [0.0, 0.0, 1.0],
                [0.0, 0.0, 1.0],
                [0.0, 0.0, 1.0],
                [-1.0, 0.0, 0.0],
                [-1.0, 0.0, 0.0],
                [-1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
            ]
        )
        faces_normals_expected = verts_normals_expected[[0, 3, 6, 9], :]

        self.assertTrue(
            torch.allclose(mesh.verts_normals_list()[0], verts_normals_expected)
        )
        self.assertTrue(
            torch.allclose(mesh.faces_normals_list()[0], faces_normals_expected)
        )
        self.assertTrue(
            torch.allclose(mesh.verts_normals_packed(), verts_normals_expected)
        )
        self.assertTrue(
            torch.allclose(mesh.faces_normals_packed(), faces_normals_expected)
        )

        # Multiple meshes in the batch with equal sized meshes
        meshes_extended = mesh.extend(3)
        for m in meshes_extended.verts_normals_list():
            self.assertTrue(torch.allclose(m, verts_normals_expected))
        for f in meshes_extended.faces_normals_list():
            self.assertTrue(torch.allclose(f, faces_normals_expected))

        # Multiple meshes in the batch with different sized meshes
        # Check padded and packed normals are the correct sizes.
        verts2 = torch.tensor(
            [
                [0.1, 0.3, 0.0],
                [0.5, 0.2, 0.0],
                [0.6, 0.8, 0.0],
                [0.0, 0.3, 0.2],
                [0.0, 0.2, 0.5],
                [0.0, 0.8, 0.7],
            ],
            dtype=torch.float32,
        )
        faces2 = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.int64)
        verts_list = [verts, verts2]
        faces_list = [faces, faces2]
        meshes = Meshes(verts=verts_list, faces=faces_list)
        verts_normals_padded = meshes.verts_normals_padded()
        faces_normals_padded = meshes.faces_normals_padded()

        for n in range(len(meshes)):
            v = verts_list[n].shape[0]
            f = faces_list[n].shape[0]
            if verts_normals_padded.shape[1] > v:
                self.assertTrue(verts_normals_padded[n, v:, :].eq(0).all())
                self.assertTrue(
                    torch.allclose(
                        verts_normals_padded[n, :v, :].view(-1, 3),
                        verts_normals_expected[:v, :],
                    )
                )
            if faces_normals_padded.shape[1] > f:
                self.assertTrue(faces_normals_padded[n, f:, :].eq(0).all())
                self.assertTrue(
                    torch.allclose(
                        faces_normals_padded[n, :f, :].view(-1, 3),
                        faces_normals_expected[:f, :],
                    )
                )

        verts_normals_packed = meshes.verts_normals_packed()
        faces_normals_packed = meshes.faces_normals_packed()
        self.assertTrue(
            list(verts_normals_packed.shape)
            == [verts.shape[0] + verts2.shape[0], 3]
        )
        self.assertTrue(
            list(faces_normals_packed.shape)
            == [faces.shape[0] + faces2.shape[0], 3]
        )

        # Single mesh where two faces share one vertex so the normal is
        # the weighted sum of the two face normals.
        verts = torch.tensor(
            [
                [0.1, 0.3, 0.0],
                [0.5, 0.2, 0.0],
                [0.0, 0.3, 0.2],  # vertex is shared between two faces
                [0.0, 0.2, 0.5],
                [0.0, 0.8, 0.7],
            ],
            dtype=torch.float32,
        )
        faces = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.int64)
        mesh = Meshes(verts=[verts], faces=[faces])

        verts_normals_expected = torch.tensor(
            [
                [-0.2408, -0.9631, -0.1204],
                [-0.2408, -0.9631, -0.1204],
                [-0.9389, -0.3414, -0.0427],
                [-1.0000, 0.0000, 0.0000],
                [-1.0000, 0.0000, 0.0000],
            ]
        )
        faces_normals_expected = torch.tensor(
            [[-0.2408, -0.9631, -0.1204], [-1.0000, 0.0000, 0.0000]]
        )
        self.assertTrue(
            torch.allclose(
                mesh.verts_normals_list()[0], verts_normals_expected, atol=4e-5
            )
        )
        self.assertTrue(
            torch.allclose(
                mesh.faces_normals_list()[0], faces_normals_expected, atol=4e-5
            )
        )

        # Check empty mesh has empty normals
        meshes = Meshes(verts=[], faces=[])
        self.assertEqual(meshes.verts_normals_packed().shape[0], 0)
        self.assertEqual(meshes.verts_normals_padded().shape[0], 0)
        self.assertEqual(meshes.verts_normals_list(), [])
        self.assertEqual(meshes.faces_normals_packed().shape[0], 0)
        self.assertEqual(meshes.faces_normals_padded().shape[0], 0)
        self.assertEqual(meshes.faces_normals_list(), [])
    def test_heterogeneous_meshes(self):
        device = torch.device("cuda:0")
        verts1 = torch.tensor(
            [[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
            dtype=torch.float32,
            device=device,
            requires_grad=True,
        )
        faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
        verts2 = torch.tensor(
            [
                [0.5, 1.0, 0.0],
                [1.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
                [1.5, 1.0, 0.0],
            ],
            dtype=torch.float32,
            device=device,
            requires_grad=True,
        )
        faces2 = torch.tensor([[0, 1, 2], [0, 3, 1]],
                              dtype=torch.int64,
                              device=device)
        faces3 = torch.tensor([[0, 1, 2], [0, 2, 3]],
                              dtype=torch.int64,
                              device=device)
        mesh = Meshes(verts=[verts1, verts2, verts2],
                      faces=[faces1, faces2, faces3])
        subdivide = SubdivideMeshes()
        new_mesh = subdivide(mesh.clone())

        gt_subdivided_verts1 = torch.tensor(
            [
                [0.5, 1.0, 0.0],
                [1.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
                [0.75, 0.5, 0.0],
                [0.25, 0.5, 0.0],
                [0.5, 0.0, 0.0],
            ],
            dtype=torch.float32,
            device=device,
        )
        gt_subdivided_faces1 = torch.tensor(
            [[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],
            dtype=torch.int64,
            device=device,
        )
        # faces2:
        #
        #         v0 _______e2_______ v3
        #           /\              /
        #          /  \            /
        #         /    \          /
        #     e1 /      \ e0     / e4
        #       /        \      /
        #      /          \    /
        #     /            \  /
        #    /______________\/
        #  v2       e3      v1
        #
        # Subdivided faces2:
        #
        #         v0 _______v6_______ v3
        #           /\      /\      /
        #          /  \ f1 /  \ f3 /
        #         / f0 \  / f7 \  /
        #     v5 /______v4______\/v8
        #       /\      /\      /
        #      /  \ f6 /  \ f5 /
        #     / f4 \  / f2 \  /
        #    /______\/______\/
        #  v2       v7       v1
        #
        gt_subdivided_verts2 = torch.tensor(
            [
                [0.5, 1.0, 0.0],
                [1.0, 0.0, 0.0],
                [0.0, 0.0, 0.0],
                [1.5, 1.0, 0.0],
                [0.75, 0.5, 0.0],
                [0.25, 0.5, 0.0],
                [1.0, 1.0, 0.0],
                [0.5, 0.0, 0.0],
                [1.25, 0.5, 0.0],
            ],
            dtype=torch.float32,
            device=device,
        )
        gt_subdivided_faces2 = torch.tensor(
            [
                [0, 4, 5],
                [0, 6, 4],
                [1, 7, 4],
                [3, 8, 6],
                [2, 5, 7],
                [1, 4, 8],
                [7, 5, 4],
                [8, 4, 6],
            ],
            dtype=torch.int64,
            device=device,
        )
        gt_subdivided_verts3 = gt_subdivided_verts2.clone()
        gt_subdivided_verts3[-1, :] = torch.tensor([0.75, 0.5, 0],
                                                   dtype=torch.float32,
                                                   device=device)
        gt_subdivided_faces3 = torch.tensor(
            [
                [0, 4, 5],
                [0, 5, 6],
                [1, 7, 4],
                [2, 8, 5],
                [2, 5, 7],
                [3, 6, 8],
                [7, 5, 4],
                [8, 6, 5],
            ],
            dtype=torch.int64,
            device=device,
        )
        new_mesh_verts1, new_mesh_faces1 = new_mesh.get_mesh_verts_faces(0)
        new_mesh_verts2, new_mesh_faces2 = new_mesh.get_mesh_verts_faces(1)
        new_mesh_verts3, new_mesh_faces3 = new_mesh.get_mesh_verts_faces(2)
        self.assertTrue(torch.allclose(new_mesh_verts1, gt_subdivided_verts1))
        self.assertTrue(torch.allclose(new_mesh_faces1, gt_subdivided_faces1))
        self.assertTrue(torch.allclose(new_mesh_verts2, gt_subdivided_verts2))
        self.assertTrue(torch.allclose(new_mesh_faces2, gt_subdivided_faces2))
        self.assertTrue(torch.allclose(new_mesh_verts3, gt_subdivided_verts3))
        self.assertTrue(torch.allclose(new_mesh_faces3, gt_subdivided_faces3))
        self.assertTrue(new_mesh_verts1.requires_grad == verts1.requires_grad)
        self.assertTrue(new_mesh_verts2.requires_grad == verts2.requires_grad)
        self.assertTrue(new_mesh_verts3.requires_grad == verts2.requires_grad)
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        Also check that the backward pass for texture atlas rendering is differentiable.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        atlas = aux.texture_atlas
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            cull_backfaces=True,
        )

        # Init shader settings
        materials = Materials(device=device,
                              specular_color=((0, 0, 0), ),
                              shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.detach().cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "DEBUG_texture_atlas_8x8_back.png")

        self.assertClose(rgb.cpu(), image_ref, atol=0.05)

        # Check gradients are propagated
        # correctly back to the texture atlas.
        # Because of how texture sampling is implemented
        # for the texture atlas it is not possible to get
        # gradients back to the vertices.
        atlas.requires_grad = True
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0001,
            faces_per_pixel=5,
            cull_backfaces=True,
            clip_barycentric_coords=True,
        )
        images = renderer(mesh, raster_settings=raster_settings)
        images[0, ...].sum().backward()

        fragments = rasterizer(mesh, raster_settings=raster_settings)
        # Some of the bary coordinates are outisde the
        # [0, 1] range as expected because the blur is > 0
        self.assertTrue(fragments.bary_coords.ge(1.0).any())
        self.assertIsNotNone(atlas.grad)
        self.assertTrue(atlas.grad.sum().abs() > 0.0)
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(verts=verts_padded,
                               faces=faces_padded,
                               textures=textures)

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s_%s.png" %
                (name, type(cameras).__name__),
                DATA_DIR,
            )
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
                        name,
                        type(cameras).__name__,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename)
                self.assertClose(rgb, image_ref, atol=0.05)
Exemple #19
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_camera"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init renderer
        rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings
        )
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_phong = load_rgb_image(
            "test_simple_sphere_light%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))

        ########################################################
        # Move the light to the +z axis in world space so it is
        # behind the sphere. Note that +Z is in, +Y up,
        # +X left for both world and camera space.
        ########################################################
        lights.location[..., 2] = -2.0
        images = renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_dark%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))

        ######################################
        # Change the shader to a GouraudShader
        ######################################
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardGouraudShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light_gouraud%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_gouraud = load_rgb_image(
            "test_simple_sphere_light_gouraud%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005))

        ######################################
        # Change the shader to a HardFlatShader
        ######################################
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardFlatShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light_flat%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_flat = load_rgb_image(
            "test_simple_sphere_light_flat%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_flat, atol=0.005))
    def test_batch_uvs(self):
        """Test that two random tori with TexturesUV render the same as each individually."""
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=10, rings=10, device=device)
        [verts] = plain_torus.verts_list()
        [faces] = plain_torus.faces_list()
        nocolor = torch.zeros((100, 100), device=device)
        color_gradient = torch.linspace(0, 1, steps=100, device=device)
        color_gradient1 = color_gradient[None].expand_as(nocolor)
        color_gradient2 = color_gradient[:, None].expand_as(nocolor)
        colors1 = torch.stack([nocolor, color_gradient1, color_gradient2],
                              dim=2)
        colors2 = torch.stack([color_gradient1, color_gradient2, nocolor],
                              dim=2)
        verts_uvs1 = torch.rand(size=(verts.shape[0], 2), device=device)
        verts_uvs2 = torch.rand(size=(verts.shape[0], 2), device=device)

        textures1 = TexturesUV(maps=[colors1],
                               faces_uvs=[faces],
                               verts_uvs=[verts_uvs1])
        textures2 = TexturesUV(maps=[colors2],
                               faces_uvs=[faces],
                               verts_uvs=[verts_uvs2])
        mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
        mesh2 = Meshes(verts=[verts], faces=[faces], textures=textures2)
        mesh_both = join_meshes_as_batch([mesh1, mesh2])

        R, T = look_at_view_transform(10, 10, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=128,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   lights=lights,
                                   cameras=cameras,
                                   blend_params=blend_params),
        )

        outputs = []
        for meshes in [mesh_both, mesh1, mesh2]:
            outputs.append(renderer(meshes))

        if DEBUG:
            Image.fromarray(
                (outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs0.png")
            Image.fromarray(
                (outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs1.png")
            Image.fromarray(
                (outputs[0][1, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs2.png")
            Image.fromarray(
                (outputs[2][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs3.png")

            diff = torch.abs(outputs[0][0, ..., :3] - outputs[1][0, ..., :3])
            Image.fromarray(((diff > 1e-5).cpu().numpy().astype(np.uint8) *
                             255)).save(DATA_DIR / "test_batch_uvs01.png")
            diff = torch.abs(outputs[0][1, ..., :3] - outputs[2][0, ..., :3])
            Image.fromarray(((diff > 1e-5).cpu().numpy().astype(np.uint8) *
                             255)).save(DATA_DIR / "test_batch_uvs23.png")

        self.assertClose(outputs[0][0, ..., :3],
                         outputs[1][0, ..., :3],
                         atol=1e-5)
        self.assertClose(outputs[0][1, ..., :3],
                         outputs[2][0, ..., :3],
                         atol=1e-5)
Exemple #21
0
    def test_sampling_output(self):
        """
        Check outputs of sampling are correct for different meshes.
        For an ico_sphere, the sampled vertices should lie on a unit sphere.
        For an empty mesh, the samples and normals should be 0.
        """
        device = torch.device("cuda:0")

        # Unit simplex.
        verts_pyramid = torch.tensor(
            [
                [0.0, 0.0, 0.0],
                [1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 0.0, 1.0],
            ],
            dtype=torch.float32,
            device=device,
        )
        faces_pyramid = torch.tensor(
            [[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 2, 3]],
            dtype=torch.int64,
            device=device,
        )
        sphere_mesh = ico_sphere(9, device)
        verts_sphere, faces_sphere = sphere_mesh.get_mesh_verts_faces(0)
        verts_empty = torch.tensor([], dtype=torch.float32, device=device)
        faces_empty = torch.tensor([], dtype=torch.int64, device=device)
        num_samples = 10
        meshes = Meshes(
            verts=[verts_empty, verts_sphere, verts_pyramid],
            faces=[faces_empty, faces_sphere, faces_pyramid],
        )
        samples, normals = sample_points_from_meshes(
            meshes, num_samples=num_samples, return_normals=True
        )
        samples = samples.cpu()
        normals = normals.cpu()

        self.assertEqual(samples.shape, (3, num_samples, 3))
        self.assertEqual(normals.shape, (3, num_samples, 3))

        # Empty meshes: should have all zeros for samples and normals.
        self.assertTrue(
            torch.allclose(samples[0, :], torch.zeros((1, num_samples, 3)))
        )
        self.assertTrue(
            torch.allclose(normals[0, :], torch.zeros((1, num_samples, 3)))
        )

        # Sphere: points should have radius 1.
        x, y, z = samples[1, :].unbind(1)
        radius = torch.sqrt(x ** 2 + y ** 2 + z ** 2)

        self.assertTrue(torch.allclose(radius, torch.ones((num_samples))))

        # Pyramid: points shoudl lie on one of the faces.
        pyramid_verts = samples[2, :]
        pyramid_normals = normals[2, :]

        self.assertTrue(
            torch.allclose(
                pyramid_verts.lt(1).float(), torch.ones_like(pyramid_verts)
            )
        )
        self.assertTrue(
            torch.allclose(
                (pyramid_verts >= 0).float(), torch.ones_like(pyramid_verts)
            )
        )

        # Face 1: z = 0,  x + y <= 1, normals = (0, 0, 1).
        face_1_idxs = pyramid_verts[:, 2] == 0
        face_1_verts, face_1_normals = (
            pyramid_verts[face_1_idxs, :],
            pyramid_normals[face_1_idxs, :],
        )
        self.assertTrue(
            torch.all((face_1_verts[:, 0] + face_1_verts[:, 1]) <= 1)
        )
        self.assertTrue(
            torch.allclose(
                face_1_normals,
                torch.tensor([0, 0, 1], dtype=torch.float32).expand(
                    face_1_normals.size()
                ),
            )
        )

        # Face 2: x = 0,  z + y <= 1, normals = (1, 0, 0).
        face_2_idxs = pyramid_verts[:, 0] == 0
        face_2_verts, face_2_normals = (
            pyramid_verts[face_2_idxs, :],
            pyramid_normals[face_2_idxs, :],
        )
        self.assertTrue(
            torch.all((face_2_verts[:, 1] + face_2_verts[:, 2]) <= 1)
        )
        self.assertTrue(
            torch.allclose(
                face_2_normals,
                torch.tensor([1, 0, 0], dtype=torch.float32).expand(
                    face_2_normals.size()
                ),
            )
        )

        # Face 3: y = 0, x + z <= 1, normals = (0, -1, 0).
        face_3_idxs = pyramid_verts[:, 1] == 0
        face_3_verts, face_3_normals = (
            pyramid_verts[face_3_idxs, :],
            pyramid_normals[face_3_idxs, :],
        )
        self.assertTrue(
            torch.all((face_3_verts[:, 0] + face_3_verts[:, 2]) <= 1)
        )
        self.assertTrue(
            torch.allclose(
                face_3_normals,
                torch.tensor([0, -1, 0], dtype=torch.float32).expand(
                    face_3_normals.size()
                ),
            )
        )

        # Face 4: x + y + z = 1, normals = (1, 1, 1)/sqrt(3).
        face_4_idxs = pyramid_verts.gt(0).all(1)
        face_4_verts, face_4_normals = (
            pyramid_verts[face_4_idxs, :],
            pyramid_normals[face_4_idxs, :],
        )
        self.assertTrue(
            torch.allclose(
                face_4_verts.sum(1), torch.ones(face_4_verts.size(0))
            )
        )
        self.assertTrue(
            torch.allclose(
                face_4_normals,
                (
                    torch.tensor([1, 1, 1], dtype=torch.float32)
                    / torch.sqrt(torch.tensor(3, dtype=torch.float32))
                ).expand(face_4_normals.size()),
            )
        )
    def test_join_verts(self):
        """Meshes with TexturesVertex joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7

        faces = plain_torus.faces_list()
        textures1 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        textures2 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        mesh1 = Meshes(verts=[verts], faces=faces, textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1], faces=faces, textures=textures2)
        mesh = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        output = renderer(mesh)

        image_ref = load_rgb_image("test_joinverts_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray((output[0, ..., :3].cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "test_joinverts_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinverts_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinverts_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
    def test_case_4(self):
        """
        Case 4 triangles have exactly 1 vertex behind the clipping plane (z=0) so
        are clipped into a smaller quadrilateral and then divided into two triangles.

        Test with a single triangle parallel to the z axis which intersects with
        the image plane.
        """

        device = "cuda:0"
        verts = torch.tensor(
            [[0.0, 0.0, -1.0], [-1.0, 0.0, 1.0], [1.0, 0.0, 1.0]],
            dtype=torch.float32,
            device=device,
        )
        faces = torch.tensor(
            [
                [0, 1, 2],
            ],
            dtype=torch.int64,
            device=device,
        )
        meshes = Meshes(verts=[verts], faces=[faces])
        clipped_faces = self.clip_faces(meshes)

        clipped_face_verts = torch.tensor(
            [
                # t1
                [
                    [-0.5050, 0.0000, 0.0100],
                    [-1.0000, 0.0000, 1.0000],
                    [0.5050, 0.0000, 0.0100],
                ],
                # t2
                [
                    [0.5050, 0.0000, 0.0100],
                    [-1.0000, 0.0000, 1.0000],
                    [1.0000, 0.0000, 1.0000],
                ],
            ],
            device=device,
            dtype=torch.float32,
        )

        barycentric_conversion = torch.tensor(
            [
                [
                    [0.4950, 0.0000, 0.4950],
                    [0.5050, 1.0000, 0.0000],
                    [0.0000, 0.0000, 0.5050],
                ],
                [
                    [0.4950, 0.0000, 0.0000],
                    [0.0000, 1.0000, 0.0000],
                    [0.5050, 0.0000, 1.0000],
                ],
            ],
            device=device,
            dtype=torch.float32,
        )

        self.assertClose(clipped_faces.face_verts, clipped_face_verts)
        self.assertEqual(clipped_faces.mesh_to_face_first_idx.item(), 0)
        self.assertEqual(clipped_faces.num_faces_per_mesh.item(),
                         2)  # now two faces instead of 1
        self.assertClose(
            clipped_faces.faces_clipped_to_unclipped_idx,
            torch.tensor([0, 0], device=device, dtype=torch.int64),
        )
        # Neighboring face for each of the sub triangles e.g. for t1, neighbor is t2,
        # and for t2, neighbor is t1
        self.assertClose(
            clipped_faces.clipped_faces_neighbor_idx,
            torch.tensor([1, 0], device=device, dtype=torch.int64),
        )
        # barycentric_conversion is of shape (F_clipped)
        self.assertEqual(clipped_faces.barycentric_conversion.shape[0], 2)
        self.assertClose(clipped_faces.barycentric_conversion,
                         barycentric_conversion)
        # Index into barycentric_conversion for each clipped face.
        self.assertClose(
            clipped_faces.faces_clipped_to_conversion_idx,
            torch.tensor([0, 1], device=device, dtype=torch.int64),
        )
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[aux.texture_atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                cull_backfaces=True)

        # Init shader settings
        materials = Materials(device=device,
                              specular_color=((0, 0, 0), ),
                              shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_atlas_8x8_back.png")

        self.assertClose(rgb, image_ref, atol=0.05)
    def test_mixture_of_cases(self):
        """
        Test with two meshes composed of different cases to check all the
        indexing is correct.
        Case 4 faces are subdivided into two faces which are referred
        to as t1 and t2.
        """
        device = "cuda:0"
        # fmt: off
        verts = [
            torch.tensor(
                [
                    [-1.0, 0.0, -1.0],  # noqa: E241, E201
                    [0.0, 1.0, -1.0],  # noqa: E241, E201
                    [1.0, 0.0, -1.0],  # noqa: E241, E201
                    [0.0, -1.0, -1.0],  # noqa: E241, E201
                    [-1.0, 0.5, 0.5],  # noqa: E241, E201
                    [1.0, 1.0, 1.0],  # noqa: E241, E201
                    [0.0, -1.0, 1.0],  # noqa: E241, E201
                    [-1.0, 0.5, -0.5],  # noqa: E241, E201
                    [1.0, 1.0, -1.0],  # noqa: E241, E201
                    [-1.0, 0.0, 1.0],  # noqa: E241, E201
                    [0.0, 1.0, 1.0],  # noqa: E241, E201
                    [1.0, 0.0, 1.0],  # noqa: E241, E201
                ],
                dtype=torch.float32,
                device=device,
            ),
            torch.tensor(
                [
                    [0.0, -1.0, -1.0],  # noqa: E241, E201
                    [-1.0, 0.5, 0.5],  # noqa: E241, E201
                    [1.0, 1.0, 1.0],  # noqa: E241, E201
                ],
                dtype=torch.float32,
                device=device)
        ]
        faces = [
            torch.tensor(
                [
                    [0, 1, 2],  # noqa: E241, E201  Case 2 fully clipped
                    [3, 4, 5
                     ],  # noqa: E241, E201  Case 4 clipped and subdivided
                    [5, 4, 3],  # noqa: E241, E201  Repeat of Case 4
                    [6, 7, 8],  # noqa: E241, E201  Case 3 clipped
                    [9, 10, 11],  # noqa: E241, E201  Case 1 untouched
                ],
                dtype=torch.int64,
                device=device,
            ),
            torch.tensor(
                [
                    [0, 1, 2],  # noqa: E241, E201  Case 4
                ],
                dtype=torch.int64,
                device=device,
            ),
        ]
        # fmt: on
        meshes = Meshes(verts=verts, faces=faces)

        # Clip meshes
        clipped_faces = self.clip_faces(meshes)

        # mesh 1: 4x faces (from Case 4) + 1 (from Case 3) + 1 (from Case 1)
        # mesh 2: 2x faces (from Case 4)
        self.assertEqual(clipped_faces.face_verts.shape[0], 6 + 2)

        # dummy idx type tensor to avoid having to initialize the dype/device each time
        idx = torch.empty(size=(1, ), dtype=torch.int64, device=device)
        unclipped_idx = idx.new_tensor([1, 1, 2, 2, 3, 4, 5, 5])
        neighbors = idx.new_tensor([1, 0, 3, 2, -1, -1, 7, 6])
        first_idx = idx.new_tensor([0, 6])
        num_faces = idx.new_tensor([6, 2])

        self.assertClose(clipped_faces.clipped_faces_neighbor_idx, neighbors)
        self.assertClose(clipped_faces.faces_clipped_to_unclipped_idx,
                         unclipped_idx)
        self.assertClose(clipped_faces.mesh_to_face_first_idx, first_idx)
        self.assertClose(clipped_faces.num_faces_per_mesh, num_faces)

        # faces_clipped_to_conversion_idx maps each output face to the
        # corresponding row of the barycentric_conversion matrix.
        # The barycentric_conversion matrix is composed by
        # finding the barycentric conversion weights for case 3 faces
        # case 4 (t1) faces and case 4 (t2) faces. These are then
        # concatenated. Therefore case 3 faces will be the first rows of
        # the barycentric_conversion matrix followed by t1 and then t2.
        # Case type of all faces: [4 (t1), 4 (t2), 4 (t1), 4 (t2), 3, 1, 4 (t1), 4 (t2)]
        # Based on this information we can calculate the indices into the
        # barycentric conversion matrix.
        bary_idx = idx.new_tensor([1, 4, 2, 5, 0, -1, 3, 6])
        self.assertClose(clipped_faces.faces_clipped_to_conversion_idx,
                         bary_idx)
Exemple #26
0
    def test_joined_spheres(self):
        """
        Test a list of Meshes can be joined as a single mesh and
        the single mesh is rendered correctly with Phong, Gouraud
        and Flat Shaders.
        """
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        # Initialize a list containing two ico spheres of different sizes.
        sphere_list = [ico_sphere(3, device), ico_sphere(4, device)]
        # [(42 verts, 80 faces), (162 verts, 320 faces)]
        # The scale the vertices need to be set at to resize the spheres
        scales = [0.25, 1]
        # The distance the spheres ought to be offset horizontally to prevent overlap.
        offsets = [1.2, -0.3]
        # Initialize a list containing the adjusted sphere meshes.
        sphere_mesh_list = []
        for i in range(len(sphere_list)):
            verts = sphere_list[i].verts_padded() * scales[i]
            verts[0, :, 0] += offsets[i]
            sphere_mesh_list.append(
                Meshes(verts=verts, faces=sphere_list[i].faces_padded())
            )
        joined_sphere_mesh = join_mesh(sphere_mesh_list)
        joined_sphere_mesh.textures = Textures(
            verts_rgb=torch.ones_like(joined_sphere_mesh.verts_padded())
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            image = renderer(joined_sphere_mesh)
            rgb = image[..., :3].squeeze().cpu()
            if DEBUG:
                file_name = "DEBUG_joined_spheres_%s.png" % name
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / file_name
                )
            image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
Exemple #27
0
    def test_simple_sphere_screen(self):

        """
        Test output when rendering with PerspectiveCameras & OrthographicCameras
        in NDC vs screen space.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(2.7, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )
        for cam_type in (PerspectiveCameras, OrthographicCameras):
            cameras = cam_type(
                device=device,
                R=R,
                T=T,
                principal_point=(
                    (
                        (512.0 - 1.0) / 2.0,
                        (512.0 - 1.0) / 2.0,
                    ),
                ),
                focal_length=(
                    (
                        (512.0 - 1.0) / 2.0,
                        (512.0 - 1.0) / 2.0,
                    ),
                ),
                image_size=((512, 512),),
                in_ndc=False,
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
            filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__

            image_ref = load_rgb_image(filename, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)