Ejemplo n.º 1
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        ref_filename = "test_rasterized_sphere.png"
        image_ref_filename = DATA_DIR / ref_filename

        # Rescale image_ref to the 0 - 1 range and convert to a binary mask.
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        # Init mesh
        sphere_mesh = ico_sphere(5, device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init rasterizer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)

        ####################################
        # 1. Test rasterizing a single mesh
        ####################################

        fragments = rasterizer(sphere_mesh)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        # Convert pix_to_face to a binary mask
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere.png")

        self.assertTrue(torch.allclose(image, image_ref))

        ##################################
        #  2. Test with a batch of meshes
        ##################################

        batch_size = 10
        sphere_meshes = sphere_mesh.extend(batch_size)
        fragments = rasterizer(sphere_meshes)
        for i in range(batch_size):
            image = fragments.pix_to_face[i, ..., 0].squeeze().cpu()
            image[image >= 0] = 1.0
            image[image < 0] = 0.0
            self.assertTrue(torch.allclose(image, image_ref))

        ####################################################
        #  3. Test that passing kwargs to rasterizer works.
        ####################################################

        #  Change the view transform to zoom in.
        R, T = look_at_view_transform(2.0, 0, 0, device=device)
        fragments = rasterizer(sphere_mesh, R=R, T=T)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        ref_filename = "test_rasterized_sphere_zoom.png"
        image_ref_filename = DATA_DIR / ref_filename
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere_zoom.png")
        self.assertTrue(torch.allclose(image, image_ref))

        #################################
        #  4. Test init without cameras.
        ##################################

        # Create a new empty rasterizer:
        rasterizer = MeshRasterizer()

        # Check that omitting the cameras in both initialization
        # and the forward pass throws an error:
        with self.assertRaisesRegex(ValueError, "Cameras must be specified"):
            rasterizer(sphere_mesh)

        # Now pass in the cameras as a kwarg
        fragments = rasterizer(sphere_mesh,
                               cameras=cameras,
                               raster_settings=raster_settings)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        # Convert pix_to_face to a binary mask
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere.png")

        self.assertTrue(torch.allclose(image, image_ref))
Ejemplo n.º 2
0
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        Also check that the backward pass for texture atlas rendering is differentiable.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        atlas = aux.texture_atlas
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            cull_backfaces=True,
        )

        # Init shader settings
        materials = Materials(device=device,
                              specular_color=((0, 0, 0), ),
                              shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.detach().cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "DEBUG_texture_atlas_8x8_back.png")

        self.assertClose(rgb.cpu(), image_ref, atol=0.05)

        # Check gradients are propagated
        # correctly back to the texture atlas.
        # Because of how texture sampling is implemented
        # for the texture atlas it is not possible to get
        # gradients back to the vertices.
        atlas.requires_grad = True
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0001,
            faces_per_pixel=5,
            cull_backfaces=True,
            clip_barycentric_coords=True,
        )
        images = renderer(mesh, raster_settings=raster_settings)
        images[0, ...].sum().backward()

        fragments = rasterizer(mesh, raster_settings=raster_settings)
        # Some of the bary coordinates are outisde the
        # [0, 1] range as expected because the blur is > 0
        self.assertTrue(fragments.bary_coords.ge(1.0).any())
        self.assertIsNotNone(atlas.grad)
        self.assertTrue(atlas.grad.sum().abs() > 0.0)
Ejemplo n.º 3
0
 def test_simple_to(self):
     # Check that to() works without a cameras object.
     device = torch.device("cuda:0")
     rasterizer = MeshRasterizer()
     rasterizer.to(device)
Ejemplo n.º 4
0
    def test_join_atlas(self):
        """Meshes with TexturesAtlas joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 1.2
        verts_shifted1[:, 0] += 4
        verts_shifted1[:, 1] += 5
        verts[:, 0] -= 4
        verts[:, 1] -= 4

        [faces] = plain_torus.faces_list()
        map_size = 3
        # Two random atlases.
        # The averaging of the random numbers here is not consistent with the
        # meaning of the atlases, but makes each face a bit smoother than
        # if everything had a random color.
        atlas1 = torch.rand(size=(faces.shape[0], map_size, map_size, 3),
                            device=device)
        atlas1[:, 1] = 0.5 * atlas1[:, 0] + 0.5 * atlas1[:, 2]
        atlas1[:, :, 1] = 0.5 * atlas1[:, :, 0] + 0.5 * atlas1[:, :, 2]
        atlas2 = torch.rand(size=(faces.shape[0], map_size, map_size, 3),
                            device=device)
        atlas2[:, 1] = 0.5 * atlas2[:, 0] + 0.5 * atlas2[:, 2]
        atlas2[:, :, 1] = 0.5 * atlas2[:, :, 0] + 0.5 * atlas2[:, :, 2]

        textures1 = TexturesAtlas(atlas=[atlas1])
        textures2 = TexturesAtlas(atlas=[atlas2])
        mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1],
                       faces=[faces],
                       textures=textures2)
        mesh_joined = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        output = renderer(mesh_joined)

        image_ref = load_rgb_image("test_joinatlas_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray((output[0, ..., :3].cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "test_joinatlas_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinatlas_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinatlas_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
Ejemplo n.º 5
0
    def test_joined_spheres(self):
        """
        Test a list of Meshes can be joined as a single mesh and
        the single mesh is rendered correctly with Phong, Gouraud
        and Flat Shaders.
        """
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        # Initialize a list containing two ico spheres of different sizes.
        sphere_list = [ico_sphere(3, device), ico_sphere(4, device)]
        # [(42 verts, 80 faces), (162 verts, 320 faces)]
        # The scale the vertices need to be set at to resize the spheres
        scales = [0.25, 1]
        # The distance the spheres ought to be offset horizontally to prevent overlap.
        offsets = [1.2, -0.3]
        # Initialize a list containing the adjusted sphere meshes.
        sphere_mesh_list = []
        for i in range(len(sphere_list)):
            verts = sphere_list[i].verts_padded() * scales[i]
            verts[0, :, 0] += offsets[i]
            sphere_mesh_list.append(
                Meshes(verts=verts, faces=sphere_list[i].faces_padded()))
        joined_sphere_mesh = join_meshes_as_scene(sphere_mesh_list)
        joined_sphere_mesh.textures = TexturesVertex(
            verts_features=torch.ones_like(joined_sphere_mesh.verts_padded()))

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            image = renderer(joined_sphere_mesh)
            rgb = image[..., :3].squeeze().cpu()
            if DEBUG:
                file_name = "DEBUG_joined_spheres_%s.png" % name
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / file_name)
            image_ref = load_rgb_image("test_joined_spheres_%s.png" % name,
                                       DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 6
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(lights=lights,
                                           cameras=cameras,
                                           materials=materials),
        )

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png")

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png")

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        #################################
        # Add blurring to rasterization
        #################################
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
        )

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png",
                                   DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
            )
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png")

            self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 7
0
    def test_simple_sphere(self, elevated_camera=False, check_depth=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = "_"
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

            raster_settings = RasterizationSettings(
                image_size=512, blur_radius=0.0, faces_per_pixel=1
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            # Test several shaders
            shader_tests = [
                ShaderTest(HardPhongShader, "phong", "hard_phong"),
                ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"),
                ShaderTest(HardFlatShader, "flat", "hard_flat"),
            ]
            for test in shader_tests:
                shader = test.shader(
                    lights=lights,
                    cameras=cameras,
                    materials=materials,
                    blend_params=blend_params,
                )
                if check_depth:
                    renderer = MeshRendererWithFragments(
                        rasterizer=rasterizer, shader=shader
                    )
                    images, fragments = renderer(sphere_mesh)
                    self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf)
                    # Check the alpha channel is the mask
                    self.assertClose(
                        images[..., -1], (fragments.pix_to_face[..., 0] >= 0).float()
                    )
                else:
                    renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
                    images = renderer(sphere_mesh)

                rgb = images[0, ..., :3].squeeze().cpu()
                filename = "simple_sphere_light_%s%s%s.png" % (
                    test.reference_name,
                    postfix,
                    cam_type.__name__,
                )

                image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

                if DEBUG:
                    debug_filename = "simple_sphere_light_%s%s%s.png" % (
                        test.debug_name,
                        postfix,
                        cam_type.__name__,
                    )
                    filename = "DEBUG_%s" % debug_filename
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )

            ########################################################
            # Move the light to the +z axis in world space so it is
            # behind the sphere. Note that +Z is in, +Y up,
            # +X left for both world and camera space.
            ########################################################
            lights.location[..., 2] = -2.0
            phong_shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            if check_depth:
                phong_renderer = MeshRendererWithFragments(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images, fragments = phong_renderer(sphere_mesh, lights=lights)
                self.assertClose(
                    fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf
                )
                # Check the alpha channel is the mask
                self.assertClose(
                    images[..., -1], (fragments.pix_to_face[..., 0] >= 0).float()
                )
            else:
                phong_renderer = MeshRenderer(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images = phong_renderer(sphere_mesh, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_simple_sphere_dark%s%s.png" % (
                    postfix,
                    cam_type.__name__,
                )
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )

            image_ref_phong_dark = load_rgb_image(
                "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__),
                DATA_DIR,
            )
            self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
Ejemplo n.º 8
0
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(verts=verts_padded,
                               faces=faces_padded,
                               textures=textures)

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s_%s.png" %
                (name, type(cameras).__name__),
                DATA_DIR,
            )
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
                        name,
                        type(cameras).__name__,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename)
                self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 9
0
    def test_batch_uvs(self):
        """Test that two random tori with TexturesUV render the same as each individually."""
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=10, rings=10, device=device)
        [verts] = plain_torus.verts_list()
        [faces] = plain_torus.faces_list()
        nocolor = torch.zeros((100, 100), device=device)
        color_gradient = torch.linspace(0, 1, steps=100, device=device)
        color_gradient1 = color_gradient[None].expand_as(nocolor)
        color_gradient2 = color_gradient[:, None].expand_as(nocolor)
        colors1 = torch.stack([nocolor, color_gradient1, color_gradient2],
                              dim=2)
        colors2 = torch.stack([color_gradient1, color_gradient2, nocolor],
                              dim=2)
        verts_uvs1 = torch.rand(size=(verts.shape[0], 2), device=device)
        verts_uvs2 = torch.rand(size=(verts.shape[0], 2), device=device)

        textures1 = TexturesUV(maps=[colors1],
                               faces_uvs=[faces],
                               verts_uvs=[verts_uvs1])
        textures2 = TexturesUV(maps=[colors2],
                               faces_uvs=[faces],
                               verts_uvs=[verts_uvs2])
        mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
        mesh2 = Meshes(verts=[verts], faces=[faces], textures=textures2)
        mesh_both = join_meshes_as_batch([mesh1, mesh2])

        R, T = look_at_view_transform(10, 10, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=128,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   lights=lights,
                                   cameras=cameras,
                                   blend_params=blend_params),
        )

        outputs = []
        for meshes in [mesh_both, mesh1, mesh2]:
            outputs.append(renderer(meshes))

        if DEBUG:
            Image.fromarray(
                (outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs0.png")
            Image.fromarray(
                (outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs1.png")
            Image.fromarray(
                (outputs[0][1, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs2.png")
            Image.fromarray(
                (outputs[2][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_batch_uvs3.png")

            diff = torch.abs(outputs[0][0, ..., :3] - outputs[1][0, ..., :3])
            Image.fromarray(((diff > 1e-5).cpu().numpy().astype(np.uint8) *
                             255)).save(DATA_DIR / "test_batch_uvs01.png")
            diff = torch.abs(outputs[0][1, ..., :3] - outputs[2][0, ..., :3])
            Image.fromarray(((diff > 1e-5).cpu().numpy().astype(np.uint8) *
                             255)).save(DATA_DIR / "test_batch_uvs23.png")

        self.assertClose(outputs[0][0, ..., :3],
                         outputs[1][0, ..., :3],
                         atol=1e-5)
        self.assertClose(outputs[0][1, ..., :3],
                         outputs[2][0, ..., :3],
                         atol=1e-5)
Ejemplo n.º 10
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gourad shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        if elevated_camera:
            R, T = look_at_view_transform(2.7, 45.0, 0.0)
            postfix = "_elevated_camera"
        else:
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Init renderer
        rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings
        )
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=PhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix
            )

        # Load reference image
        image_ref_phong = load_rgb_image(
            "test_simple_sphere_illuminated%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))

        ###################################
        # Move the light behind the object
        ###################################
        # Check the image is dark
        lights.location[..., 2] = +2.0
        images = renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix
            )

        # Load reference image
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))

        ######################################
        # Change the shader to a GouradShader
        ######################################
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=GouradShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light_gourad%s.png" % postfix
            )

        # Load reference image
        image_ref_gourad = load_rgb_image(
            "test_simple_sphere_light_gourad%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_gourad, atol=0.005))
        self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))
Ejemplo n.º 11
0
    def test_simple_sphere_outside_zfar(self):
        """
        Test output when rendering a sphere that is beyond zfar with a SoftPhongShader.
        This renders a sphere of radius 500, with the camera at x=1500 for different
        settings of zfar.  This is intended to check 1) setting cameras.zfar propagates
        to the blender and that the rendered sphere is (soft) clipped if it is beyond
        zfar, 2) make sure there are no numerical precision/overflow errors associated
        with larger world coordinates
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded() * 500
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded,
                             faces=faces_padded,
                             textures=textures)

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +1000.0],
                                       device=device)[None]

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)
        for zfar in (10000.0, 100.0):
            cameras = FoVPerspectiveCameras(device=device,
                                            R=R,
                                            T=T,
                                            aspect_ratio=1.0,
                                            fov=60.0,
                                            zfar=zfar)
            rasterizer = MeshRasterizer(cameras=cameras,
                                        raster_settings=raster_settings)
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0))

            shader = SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar)

            # Load reference image
            image_ref = load_rgb_image(filename, DATA_DIR)

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_" + filename))

            self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 12
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly
        """
        device = torch.device("cuda:0")
        DATA_DIR = (
            Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        )
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(obj_filename)
        faces_idx = faces.verts_idx.to(device)
        verts = verts.to(device)
        texture_uvs = aux.verts_uvs
        materials = aux.material_colors
        tex_maps = aux.texture_images

        # tex_maps is a dictionary of material names as keys and texture images
        # as values. Only need the images for this example.
        textures = Textures(
            maps=list(tex_maps.values()),
            faces_uvs=faces.textures_idx.to(torch.int64).to(device)[None, :],
            verts_uvs=texture_uvs.to(torch.float32).to(device)[None, :],
        )
        mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            shader=TexturedPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map.png"
            )

        # There's a calculation instability on the corner of the ear of the cow.
        # We ignore that pixel.
        image_ref[137, 166] = 0
        rgb[137, 166] = 0

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        verts = verts.clone()
        verts.requires_grad = True
        mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures)
        images = renderer(mesh)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.
Ejemplo n.º 13
0
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[aux.texture_atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                cull_backfaces=True)

        # Init shader settings
        materials = Materials(device=device,
                              specular_color=((0, 0, 0), ),
                              shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_atlas_8x8_back.png")

        self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 14
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly
        """
        device = torch.device("cuda:0")
        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(lights=lights,
                                           cameras=cameras,
                                           materials=materials),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map.png")

        # There's a calculation instability on the corner of the ear of the cow.
        # We ignore that pixel.
        image_ref[137, 166] = 0
        rgb[137, 166] = 0

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        #################################
        # Add blurring to rasterization
        #################################

        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
            bin_size=0,
        )

        images = renderer(
            mesh.clone(),
            raster_settings=raster_settings,
            blend_params=blend_params,
        )
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_blurry_textured_rendering.png")

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
Ejemplo n.º 15
0
    def test_cube_mesh_render(self):
        """
        End-End test of rendering a cube mesh with texture
        from decreasing camera distances. The camera starts
        outside the cube and enters the inside of the cube.
        """
        device = torch.device("cuda:0")
        mesh = self.load_cube_mesh_with_texture(device)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=1e-8,
            faces_per_pixel=5,
            z_clip_value=1e-2,
            perspective_correct=True,
            bin_size=0,
        )

        # Only ambient, no diffuse or specular
        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
            location=[[0.0, 0.0, -3.0]],
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=SoftPhongShader(device=device, lights=lights),
        )

        # Render the cube by decreasing the distance from the camera until
        # the camera enters the cube. Check the output looks correct.
        images_list = []
        dists = np.linspace(0.1, 2.5, 20)[::-1]
        for d in dists:
            R, T = look_at_view_transform(d, 0, 0)
            T[0, 1] -= 0.1  # move down in the y axis
            cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
            images = renderer(mesh, cameras=cameras)
            rgb = images[0, ..., :3].cpu().detach()
            filename = "DEBUG_cube_dist=%.1f.jpg" % d
            im = (rgb.numpy() * 255).astype(np.uint8)
            images_list.append(im)

            # Check one of the images where the camera is inside the mesh
            if d == 0.5:
                filename = "test_render_mesh_clipped_cam_dist=0.5.jpg"
                image_ref = load_rgb_image(filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

        # Save a gif of the output - this should show
        # the camera moving inside the cube.
        if DEBUG:
            gif_filename = ("room_original.gif"
                            if raster_settings.z_clip_value is None else
                            "room_clipped.gif")
            imageio.mimsave(DATA_DIR / gif_filename, images_list, fps=2)
            save_obj(
                f=DATA_DIR / "cube.obj",
                verts=mesh.verts_packed().cpu(),
                faces=mesh.faces_packed().cpu(),
            )
Ejemplo n.º 16
0
    def test_join_uvs(self):
        """Meshes with TexturesUV joined into a scene"""
        # Test the result of rendering three tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        # This tests TexturesUV.join_scene with rectangle flipping,
        # and we check the form of the merged map as well.
        torch.manual_seed(1)
        device = torch.device("cuda:0")

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7
        verts_shifted2 = verts.clone()
        verts_shifted2 *= 0.5
        verts_shifted2[:, 1] -= 7

        [faces] = plain_torus.faces_list()
        nocolor = torch.zeros((100, 100), device=device)
        color_gradient = torch.linspace(0, 1, steps=100, device=device)
        color_gradient1 = color_gradient[None].expand_as(nocolor)
        color_gradient2 = color_gradient[:, None].expand_as(nocolor)
        colors1 = torch.stack([nocolor, color_gradient1, color_gradient2],
                              dim=2)
        colors2 = torch.stack([color_gradient1, color_gradient2, nocolor],
                              dim=2)
        verts_uvs1 = torch.rand(size=(verts.shape[0], 2), device=device)
        verts_uvs2 = torch.rand(size=(verts.shape[0], 2), device=device)

        for i, align_corners, padding_mode in [
            (0, True, "border"),
            (1, False, "border"),
            (2, False, "zeros"),
        ]:
            textures1 = TexturesUV(
                maps=[colors1],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs1],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )

            # These downsamplings of colors2 are chosen to ensure a flip and a non flip
            # when the maps are merged.
            # We have maps of size (100, 100), (50, 99) and (99, 50).
            textures2 = TexturesUV(
                maps=[colors2[::2, :-1]],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            offset = torch.tensor([0, 0, 0.5], device=device)
            textures3 = TexturesUV(
                maps=[colors2[:-1, ::2] + offset],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
            mesh2 = Meshes(verts=[verts_shifted1],
                           faces=[faces],
                           textures=textures2)
            mesh3 = Meshes(verts=[verts_shifted2],
                           faces=[faces],
                           textures=textures3)
            mesh = join_meshes_as_scene([mesh1, mesh2, mesh3])

            output = renderer(mesh)[0, ..., :3].cpu()
            output1 = renderer(mesh1)[0, ..., :3].cpu()
            output2 = renderer(mesh2)[0, ..., :3].cpu()
            output3 = renderer(mesh3)[0, ..., :3].cpu()
            # The background color is white and the objects do not overlap, so we can
            # predict the merged image by taking the minimum over every channel
            merged = torch.min(torch.min(output1, output2), output3)

            image_ref = load_rgb_image(f"test_joinuvs{i}_final.png", DATA_DIR)
            map_ref = load_rgb_image(f"test_joinuvs{i}_map.png", DATA_DIR)

            if DEBUG:
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_final_.png")
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_merged.png")

                Image.fromarray((output1.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_1.png")
                Image.fromarray((output2.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_2.png")
                Image.fromarray((output3.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_3.png")
                Image.fromarray((mesh.textures.maps_padded()[0].cpu().numpy() *
                                 255).astype(np.uint8)).save(
                                     DATA_DIR / f"test_joinuvs{i}_map_.png")
                Image.fromarray(
                    (mesh2.textures.maps_padded()[0].cpu().numpy() *
                     255).astype(np.uint8)).save(DATA_DIR /
                                                 f"test_joinuvs{i}_map2.png")
                Image.fromarray(
                    (mesh3.textures.maps_padded()[0].cpu().numpy() *
                     255).astype(np.uint8)).save(DATA_DIR /
                                                 f"test_joinuvs{i}_map3.png")

            self.assertClose(output, merged, atol=0.015)
            self.assertClose(output, image_ref, atol=0.05)
            self.assertClose(mesh.textures.maps_padded()[0].cpu(),
                             map_ref,
                             atol=0.05)
Ejemplo n.º 17
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        ref_filename = "test_rasterized_sphere.png"
        image_ref_filename = DATA_DIR / ref_filename

        # Rescale image_ref to the 0 - 1 range and convert to a binary mask.
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        # Init mesh
        sphere_mesh = ico_sphere(5, device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init rasterizer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)

        ####################################
        # 1. Test rasterizing a single mesh
        ####################################

        fragments = rasterizer(sphere_mesh)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        # Convert pix_to_face to a binary mask
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere.png")

        self.assertTrue(torch.allclose(image, image_ref))

        ##################################
        #  2. Test with a batch of meshes
        ##################################

        batch_size = 10
        sphere_meshes = sphere_mesh.extend(batch_size)
        fragments = rasterizer(sphere_meshes)
        for i in range(batch_size):
            image = fragments.pix_to_face[i, ..., 0].squeeze().cpu()
            image[image >= 0] = 1.0
            image[image < 0] = 0.0
            self.assertTrue(torch.allclose(image, image_ref))

        ####################################################
        #  3. Test that passing kwargs to rasterizer works.
        ####################################################

        #  Change the view transform to zoom in.
        R, T = look_at_view_transform(2.0, 0, 0, device=device)
        fragments = rasterizer(sphere_mesh, R=R, T=T)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        ref_filename = "test_rasterized_sphere_zoom.png"
        image_ref_filename = DATA_DIR / ref_filename
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere_zoom.png")
        self.assertTrue(torch.allclose(image, image_ref))


# Helpful comments below.# Helpful comments below.# Helpful comments below.
Ejemplo n.º 18
0
    def test_join_verts(self):
        """Meshes with TexturesVertex joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7

        faces = plain_torus.faces_list()
        textures1 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        textures2 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        mesh1 = Meshes(verts=[verts], faces=faces, textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1], faces=faces, textures=textures2)
        mesh = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=device,
                                   blend_params=blend_params,
                                   cameras=cameras,
                                   lights=lights),
        )

        output = renderer(mesh)

        image_ref = load_rgb_image("test_joinverts_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray((output[0, ..., :3].cpu().numpy() * 255).astype(
                np.uint8)).save(DATA_DIR / "test_joinverts_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinverts_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(
                    np.uint8)).save(DATA_DIR / "test_joinverts_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
Ejemplo n.º 19
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(verts=verts_padded,
                             faces=faces_padded,
                             textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_camera"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)

        # Test several shaders
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(lights=lights,
                                 cameras=cameras,
                                 materials=materials)
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            filename = "simple_sphere_light_%s%s.png" % (name, postfix)
            image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename)
            self.assertClose(rgb, image_ref, atol=0.05)

        ########################################################
        # Move the light to the +z axis in world space so it is
        # behind the sphere. Note that +Z is in, +Y up,
        # +X left for both world and camera space.
        ########################################################
        lights.location[..., 2] = -2.0
        phong_shader = HardPhongShader(lights=lights,
                                       cameras=cameras,
                                       materials=materials)
        phong_renderer = MeshRenderer(rasterizer=rasterizer,
                                      shader=phong_shader)
        images = phong_renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_dark%s.png" % postfix
            Image.fromarray(
                (rgb.numpy() * 255).astype(np.uint8)).save(DATA_DIR / filename)

        # Load reference image
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix, DATA_DIR)
        self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
Ejemplo n.º 20
0
    def test_simple_sphere_screen(self):

        """
        Test output when rendering with PerspectiveCameras & OrthographicCameras
        in NDC vs screen space.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(2.7, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )
        for cam_type in (PerspectiveCameras, OrthographicCameras):
            cameras = cam_type(
                device=device,
                R=R,
                T=T,
                principal_point=(
                    (
                        (512.0 - 1.0) / 2.0,
                        (512.0 - 1.0) / 2.0,
                    ),
                ),
                focal_length=(
                    (
                        (512.0 - 1.0) / 2.0,
                        (512.0 - 1.0) / 2.0,
                    ),
                ),
                image_size=((512, 512),),
                in_ndc=False,
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
            filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__

            image_ref = load_rgb_image(filename, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)