Ejemplo n.º 1
0
    def test_grad(self):
        """
        Check that gradient flow is unaffected when the camera is inside the mesh
        """
        device = torch.device("cuda:0")
        mesh, verts = self.load_cube_mesh_with_texture(device=device,
                                                       with_grad=True)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=1e-5,
            faces_per_pixel=5,
            z_clip_value=1e-2,
            perspective_correct=True,
            bin_size=0,
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=SoftPhongShader(device=device),
        )
        dist = 0.4  # Camera is inside the cube
        R, T = look_at_view_transform(dist, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
        images = renderer(mesh, cameras=cameras)
        images.sum().backward()

        # Check gradients exist
        self.assertIsNotNone(verts.grad)
Ejemplo n.º 2
0
    def test_simple_sphere_outside_zfar(self):
        """
        Test output when rendering a sphere that is beyond zfar with a SoftPhongShader.
        This renders a sphere of radius 500, with the camera at x=1500 for different
        settings of zfar.  This is intended to check 1) setting cameras.zfar propagates
        to the blender and that the rendered sphere is (soft) clipped if it is beyond
        zfar, 2) make sure there are no numerical precision/overflow errors associated
        with larger world coordinates
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded() * 500
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        for zfar in (10000.0, 100.0):
            cameras = FoVPerspectiveCameras(
                device=device, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=zfar
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0))

            shader = SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar)

            # Load reference image
            image_ref = load_rgb_image(filename, DATA_DIR)

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_" + filename)
                )

            self.assertClose(rgb, image_ref, atol=0.05)
Ejemplo n.º 3
0
    # ライトの作成
    lights = PointLights(
        device=device,
        location=[[args.light_pos_x, args.light_pos_y, args.light_pos_z]])

    # マテリアルの作成
    materials = Materials(device=device,
                          specular_color=[[0.2, 0.2, 0.2]],
                          shininess=10.0)

    # シェーダーの作成
    if (args.shader == "soft_silhouette_shader"):
        shader = SoftSilhouetteShader()
    elif (args.shader == "soft_phong_shader"):
        shader = SoftPhongShader(device=device,
                                 cameras=cameras,
                                 lights=lights,
                                 materials=materials)
    elif (args.shader == "textured_soft_phong_shader"):
        shader = TexturedSoftPhongShader(device=device,
                                         cameras=cameras,
                                         lights=lights,
                                         materials=materials)
    else:
        NotImplementedError()

    # レンダラーの作成
    renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

    #================================
    # レンダリングループ処理
    #================================
Ejemplo n.º 4
0
    def test_cube_mesh_render(self):
        """
        End-End test of rendering a cube mesh with texture
        from decreasing camera distances. The camera starts
        outside the cube and enters the inside of the cube.
        """
        device = torch.device("cuda:0")
        mesh = self.load_cube_mesh_with_texture(device)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=1e-8,
            faces_per_pixel=5,
            z_clip_value=1e-2,
            perspective_correct=True,
            bin_size=0,
        )

        # Only ambient, no diffuse or specular
        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
            location=[[0.0, 0.0, -3.0]],
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=SoftPhongShader(device=device, lights=lights),
        )

        # Render the cube by decreasing the distance from the camera until
        # the camera enters the cube. Check the output looks correct.
        images_list = []
        dists = np.linspace(0.1, 2.5, 20)[::-1]
        for d in dists:
            R, T = look_at_view_transform(d, 0, 0)
            T[0, 1] -= 0.1  # move down in the y axis
            cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
            images = renderer(mesh, cameras=cameras)
            rgb = images[0, ..., :3].cpu().detach()
            filename = "DEBUG_cube_dist=%.1f.jpg" % d
            im = (rgb.numpy() * 255).astype(np.uint8)
            images_list.append(im)

            # Check one of the images where the camera is inside the mesh
            if d == 0.5:
                filename = "test_render_mesh_clipped_cam_dist=0.5.jpg"
                image_ref = load_rgb_image(filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

        # Save a gif of the output - this should show
        # the camera moving inside the cube.
        if DEBUG:
            gif_filename = ("room_original.gif"
                            if raster_settings.z_clip_value is None else
                            "room_clipped.gif")
            imageio.mimsave(DATA_DIR / gif_filename, images_list, fps=2)
            save_obj(
                f=DATA_DIR / "cube.obj",
                verts=mesh.verts_packed().cpu(),
                faces=mesh.faces_packed().cpu(),
            )
    def test_to(self):
        # Test moving all the tensors in the renderer to a new device
        # to support multigpu rendering.
        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0],
                                       device=device1)[None]

        raster_settings = RasterizationSettings(image_size=256,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)
        cameras = FoVPerspectiveCameras(device=device1,
                                        R=R,
                                        T=T,
                                        aspect_ratio=1.0,
                                        fov=60.0,
                                        zfar=100)
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3,
                                         dtype=torch.float32,
                                         device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        def _check_props_on_device(renderer, device):
            self.assertEqual(renderer.rasterizer.cameras.device, device)
            self.assertEqual(renderer.shader.cameras.device, device)
            self.assertEqual(renderer.shader.lights.device, device)
            self.assertEqual(renderer.shader.lights.ambient_color.device,
                             device)
            self.assertEqual(renderer.shader.materials.device, device)
            self.assertEqual(renderer.shader.materials.ambient_color.device,
                             device)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1))
        mesh.textures = textures
        _check_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer.to(device2)
        mesh = mesh.to(device2)
        _check_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
    def test_render_cow(self):
        """
        Test a larger textured mesh is rendered correctly in a non square image.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(obj_filename,
                                     device=device,
                                     load_textures=True,
                                     texture_wrap=None)
        tex_map = list(aux.texture_images.values())[0]
        tex_map = tex_map[None, ...].to(faces.textures_idx.device)
        textures = TexturesUV(maps=tex_map,
                              faces_uvs=[faces.textures_idx],
                              verts_uvs=[aux.verts_uvs])
        mesh = Meshes(verts=[verts],
                      faces=[faces.verts_idx],
                      textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=(512, 1024),
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_cow_image_rectangle.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_cow_image_rectangle.png")

            # NOTE some pixels can be flaky
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            self.assertTrue(cond1)