def test_simple_sphere_batched(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        batch_size = 20
        pointclouds = pointclouds.extend(batch_size)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        images = renderer(pointclouds)
        for i in range(batch_size):
            rgb = images[i, ..., :3].squeeze().cpu()
            if i == 0 and DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        # Shift vertices to check coordinate frames are correct.
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
Example #3
0
    def test_render_pointcloud(self):
        """
        Test a textured point cloud is rendered correctly in a non square image.
        """
        device = torch.device("cuda:0")
        pointclouds = Pointclouds(
            points=[torus_points * 2.0],
            features=torch.ones_like(torus_points[None, ...]),
        ).to(device)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=(512, 1024), radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = AlphaCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        image_ref = load_rgb_image("test_pointcloud_rectangle_image.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_pointcloud_rectangle_image.png"
                )

            # NOTE some pixels can be flaky
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            self.assertTrue(cond1)
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(verts=verts_padded,
                               faces=faces_padded,
                               textures=textures)

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s.png" % name, DATA_DIR)
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s.png" % name
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename)
                self.assertClose(rgb, image_ref, atol=0.05)
Example #5
0
    def test_render_voxels(self):
        """
        Test rendering meshes formed from voxels.
        """
        # Set up device and seed for random selections.
        device = torch.device("cuda:0")

        # Load dataset in the train split with only a single view returned for each model.
        r2n2_dataset = R2N2(
            "train",
            SHAPENET_PATH,
            R2N2_PATH,
            SPLITS_PATH,
            return_voxels=True,
            voxels_rel_path=VOXELS_REL_PATH,
        )
        r2n2_model = r2n2_dataset[6, [5]]
        vox_render = render_cubified_voxels(r2n2_model["voxels"], device=device)
        vox_render_rgb = vox_render[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((vox_render_rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / ("DEBUG_r2n2_voxel_to_mesh_render.png")
            )
        image_ref = load_rgb_image("test_r2n2_voxel_to_mesh_render.png", DATA_DIR)
        self.assertClose(vox_render_rgb, image_ref, atol=0.05)
    def load_cube_mesh_with_texture(self,
                                    device="cpu",
                                    with_grad: bool = False):
        verts = torch.tensor(
            [
                [-1, 1, 1],
                [1, 1, 1],
                [1, -1, 1],
                [-1, -1, 1],
                [-1, 1, -1],
                [1, 1, -1],
                [1, -1, -1],
                [-1, -1, -1],
            ],
            device=device,
            dtype=torch.float32,
            requires_grad=with_grad,
        )

        # all faces correctly wound
        faces = torch.tensor(
            [
                [0, 1, 4],
                [4, 1, 5],
                [1, 2, 5],
                [5, 2, 6],
                [2, 7, 6],
                [2, 3, 7],
                [3, 4, 7],
                [0, 4, 3],
                [4, 5, 6],
                [4, 6, 7],
            ],
            device=device,
            dtype=torch.int64,
        )

        verts_uvs = torch.tensor(
            [[
                [0, 1],
                [1, 1],
                [1, 0],
                [0, 0],
                [0.204, 0.743],
                [0.781, 0.743],
                [0.781, 0.154],
                [0.204, 0.154],
            ]],
            device=device,
            dtype=torch.float,
        )
        texture_map = load_rgb_image("room.jpg", DATA_DIR).to(device)
        textures = TexturesUV(maps=[texture_map],
                              faces_uvs=faces.unsqueeze(0),
                              verts_uvs=verts_uvs)
        mesh = Meshes([verts], [faces], textures=textures)
        if with_grad:
            return mesh, verts
        return mesh
Example #7
0
    def test_join_verts(self):
        """Meshes with TexturesVertex joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7

        faces = plain_torus.faces_list()
        textures1 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        textures2 = TexturesVertex(verts_features=[torch.rand_like(verts)])
        mesh1 = Meshes(verts=[verts], faces=faces, textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1], faces=faces, textures=textures2)
        mesh = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )

        lights = AmbientLights(device=device)
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=HardPhongShader(
                device=device, blend_params=blend_params, cameras=cameras, lights=lights
            ),
        )

        output = renderer(mesh)

        image_ref = load_rgb_image("test_joinverts_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray(
                (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinverts_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinverts_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinverts_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
Example #8
0
    def test_simple_sphere_outside_zfar(self):
        """
        Test output when rendering a sphere that is beyond zfar with a SoftPhongShader.
        This renders a sphere of radius 500, with the camera at x=1500 for different
        settings of zfar.  This is intended to check 1) setting cameras.zfar propagates
        to the blender and that the rendered sphere is (soft) clipped if it is beyond
        zfar, 2) make sure there are no numerical precision/overflow errors associated
        with larger world coordinates
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded() * 500
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        for zfar in (10000.0, 100.0):
            cameras = FoVPerspectiveCameras(
                device=device, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=zfar
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0))

            shader = SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar)

            # Load reference image
            image_ref = load_rgb_image(filename, DATA_DIR)

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_" + filename)
                )

            self.assertClose(rgb, image_ref, atol=0.05)
Example #9
0
    def test_simple_sphere_screen(self):

        """
        Test output when rendering with PerspectiveCameras & OrthographicCameras
        in NDC vs screen space.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(2.7, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )
        half_half = (512.0 / 2.0, 512.0 / 2.0)
        for cam_type in (PerspectiveCameras, OrthographicCameras):
            cameras = cam_type(
                device=device,
                R=R,
                T=T,
                principal_point=(half_half,),
                focal_length=(half_half,),
                image_size=((512, 512),),
                in_ndc=False,
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
            filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__
            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"{filename}_.png"
                )

            image_ref = load_rgb_image(filename, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
Example #10
0
    def test_cameras_kwarg(self):
        """
        Test that when cameras are passed in as a kwarg the rendering
        works as expected
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        # No elevation or azimuth rotation
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

            raster_settings = RasterizationSettings(
                image_size=512, blur_radius=0.0, faces_per_pixel=1
            )
            rasterizer = MeshRasterizer(raster_settings=raster_settings)
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

            # Cameras can be passed into the renderer in the forward pass
            images = renderer(sphere_mesh, cameras=cameras)
            rgb = images.squeeze()[..., :3].cpu().numpy()
            image_ref = load_rgb_image(
                "test_simple_sphere_light_phong_%s.png" % cam_type.__name__, DATA_DIR
            )
            self.assertClose(rgb, image_ref, atol=0.05)
Example #11
0
 def test_simple_sphere_pulsar(self):
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         # Shift vertices to check coordinate frames are correct.
         verts_padded[..., 1] += 0.2
         verts_padded[..., 0] += 0.2
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         for azimuth in [0.0, 90.0]:
             R, T = look_at_view_transform(2.7, 0.0, azimuth)
             for camera_name, cameras in [
                 ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
                 (
                     "fovorthographic",
                     FoVOrthographicCameras(device=device, R=R, T=T),
                 ),
                 ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
                 ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
             ]:
                 raster_settings = PointsRasterizationSettings(
                     image_size=256, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Load reference image
                 filename = (
                     "pulsar_simple_pointcloud_sphere_"
                     f"azimuth{azimuth}_{camera_name}.png"
                 )
                 image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                 images = renderer(
                     pointclouds, gamma=(1e-3,), znear=(1.0,), zfar=(100.0,)
                 )
                 rgb = images[0, ..., :3].squeeze().cpu()
                 if DEBUG:
                     filename = "DEBUG_%s" % filename
                     Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                         DATA_DIR / filename
                     )
                 self.assertClose(rgb, image_ref, rtol=7e-3, atol=5e-3)
Example #12
0
    def test_render_by_r2n2_calibration(self):
        """
        Test rendering R2N2 models with calibration matrices from R2N2's own Blender
        in batches.
        """
        # Set up device and seed for random selections.
        device = torch.device("cuda:0")
        torch.manual_seed(39)

        # Load dataset in the train split.
        r2n2_dataset = R2N2("train", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH)
        model_idxs = torch.randint(1000, (2, )).tolist()
        view_idxs = torch.randint(24, (2, )).tolist()
        raster_settings = RasterizationSettings(image_size=512)
        lights = PointLights(
            location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
            # TODO(nikhilar): debug the source of the discrepancy in two images when
            # rendering on GPU.
            diffuse_color=((0, 0, 0), ),
            specular_color=((0, 0, 0), ),
            device=device,
        )
        r2n2_batch = r2n2_dataset.render(
            idxs=model_idxs,
            view_idxs=view_idxs,
            device=device,
            raster_settings=raster_settings,
            lights=lights,
        )
        for idx in range(4):
            r2n2_batch_rgb = r2n2_batch[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (r2n2_batch_rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR /
                        ("DEBUG_r2n2_render_with_blender_calibrations_%s.png" %
                         idx))
            image_ref = load_rgb_image(
                "test_r2n2_render_with_blender_calibrations_%s.png" % idx,
                DATA_DIR)
            self.assertClose(r2n2_batch_rgb, image_ref, atol=0.05)
Example #13
0
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        Also check that the backward pass for texture atlas rendering is differentiable.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        atlas = aux.texture_atlas
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            cull_backfaces=True,
            perspective_correct=False,
        )

        # Init shader settings
        materials = Materials(device=device, specular_color=((0, 0, 0),), shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.detach().cpu().numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_atlas_8x8_back.png"
            )

        self.assertClose(rgb.cpu(), image_ref, atol=0.05)

        # Check gradients are propagated
        # correctly back to the texture atlas.
        # Because of how texture sampling is implemented
        # for the texture atlas it is not possible to get
        # gradients back to the vertices.
        atlas.requires_grad = True
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[atlas]),
        )
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0001,
            faces_per_pixel=5,
            cull_backfaces=True,
            clip_barycentric_coords=True,
        )
        images = renderer(mesh, raster_settings=raster_settings)
        images[0, ...].sum().backward()

        fragments = rasterizer(mesh, raster_settings=raster_settings)
        # Some of the bary coordinates are outisde the
        # [0, 1] range as expected because the blur is > 0
        self.assertTrue(fragments.bary_coords.ge(1.0).any())
        self.assertIsNotNone(atlas.grad)
        self.assertTrue(atlas.grad.sum().abs() > 0.0)
Example #14
0
    def test_joined_spheres(self):
        """
        Test a list of Meshes can be joined as a single mesh and
        the single mesh is rendered correctly with Phong, Gouraud
        and Flat Shaders.
        """
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        # Initialize a list containing two ico spheres of different sizes.
        sphere_list = [ico_sphere(3, device), ico_sphere(4, device)]
        # [(42 verts, 80 faces), (162 verts, 320 faces)]
        # The scale the vertices need to be set at to resize the spheres
        scales = [0.25, 1]
        # The distance the spheres ought to be offset horizontally to prevent overlap.
        offsets = [1.2, -0.3]
        # Initialize a list containing the adjusted sphere meshes.
        sphere_mesh_list = []
        for i in range(len(sphere_list)):
            verts = sphere_list[i].verts_padded() * scales[i]
            verts[0, :, 0] += offsets[i]
            sphere_mesh_list.append(
                Meshes(verts=verts, faces=sphere_list[i].faces_padded())
            )
        joined_sphere_mesh = join_meshes_as_scene(sphere_mesh_list)
        joined_sphere_mesh.textures = TexturesVertex(
            verts_features=torch.ones_like(joined_sphere_mesh.verts_padded())
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=False,
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            image = renderer(joined_sphere_mesh)
            rgb = image[..., :3].squeeze().cpu()
            if DEBUG:
                file_name = "DEBUG_joined_spheres_%s.png" % name
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / file_name
                )
            image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
Example #15
0
    def test_join_atlas(self):
        """Meshes with TexturesAtlas joined into a scene"""
        # Test the result of rendering two tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        torch.manual_seed(1)
        device = torch.device("cuda:0")
        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 1.2
        verts_shifted1[:, 0] += 4
        verts_shifted1[:, 1] += 5
        verts[:, 0] -= 4
        verts[:, 1] -= 4

        [faces] = plain_torus.faces_list()
        map_size = 3
        # Two random atlases.
        # The averaging of the random numbers here is not consistent with the
        # meaning of the atlases, but makes each face a bit smoother than
        # if everything had a random color.
        atlas1 = torch.rand(size=(faces.shape[0], map_size, map_size, 3), device=device)
        atlas1[:, 1] = 0.5 * atlas1[:, 0] + 0.5 * atlas1[:, 2]
        atlas1[:, :, 1] = 0.5 * atlas1[:, :, 0] + 0.5 * atlas1[:, :, 2]
        atlas2 = torch.rand(size=(faces.shape[0], map_size, map_size, 3), device=device)
        atlas2[:, 1] = 0.5 * atlas2[:, 0] + 0.5 * atlas2[:, 2]
        atlas2[:, :, 1] = 0.5 * atlas2[:, :, 0] + 0.5 * atlas2[:, :, 2]

        textures1 = TexturesAtlas(atlas=[atlas1])
        textures2 = TexturesAtlas(atlas=[atlas2])
        mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
        mesh2 = Meshes(verts=[verts_shifted1], faces=[faces], textures=textures2)
        mesh_joined = join_meshes_as_scene([mesh1, mesh2])

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=False,
        )

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0),),
            diffuse_color=((0.0, 0.0, 0.0),),
            specular_color=((0.0, 0.0, 0.0),),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=HardPhongShader(
                device=device, blend_params=blend_params, cameras=cameras, lights=lights
            ),
        )

        output = renderer(mesh_joined)

        image_ref = load_rgb_image("test_joinatlas_final.png", DATA_DIR)

        if DEBUG:
            debugging_outputs = []
            for mesh_ in [mesh1, mesh2]:
                debugging_outputs.append(renderer(mesh_))
            Image.fromarray(
                (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinatlas_final_.png")
            Image.fromarray(
                (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinatlas_1.png")
            Image.fromarray(
                (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
            ).save(DATA_DIR / "test_joinatlas_2.png")

        result = output[0, ..., :3].cpu()
        self.assertClose(result, image_ref, atol=0.05)
Example #16
0
    def test_join_uvs(self):
        """Meshes with TexturesUV joined into a scene"""
        # Test the result of rendering three tori with separate textures.
        # The expected result is consistent with rendering them each alone.
        # This tests TexturesUV.join_scene with rectangle flipping,
        # and we check the form of the merged map as well.
        torch.manual_seed(1)
        device = torch.device("cuda:0")

        R, T = look_at_view_transform(18, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )

        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0),),
            diffuse_color=((0.0, 0.0, 0.0),),
            specular_color=((0.0, 0.0, 0.0),),
        )
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=HardPhongShader(
                device=device, blend_params=blend_params, cameras=cameras, lights=lights
            ),
        )

        plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)
        [verts] = plain_torus.verts_list()
        verts_shifted1 = verts.clone()
        verts_shifted1 *= 0.5
        verts_shifted1[:, 1] += 7
        verts_shifted2 = verts.clone()
        verts_shifted2 *= 0.5
        verts_shifted2[:, 1] -= 7

        [faces] = plain_torus.faces_list()
        nocolor = torch.zeros((100, 100), device=device)
        color_gradient = torch.linspace(0, 1, steps=100, device=device)
        color_gradient1 = color_gradient[None].expand_as(nocolor)
        color_gradient2 = color_gradient[:, None].expand_as(nocolor)
        colors1 = torch.stack([nocolor, color_gradient1, color_gradient2], dim=2)
        colors2 = torch.stack([color_gradient1, color_gradient2, nocolor], dim=2)
        verts_uvs1 = torch.rand(size=(verts.shape[0], 2), device=device)
        verts_uvs2 = torch.rand(size=(verts.shape[0], 2), device=device)

        for i, align_corners, padding_mode in [
            (0, True, "border"),
            (1, False, "border"),
            (2, False, "zeros"),
        ]:
            textures1 = TexturesUV(
                maps=[colors1],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs1],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )

            # These downsamplings of colors2 are chosen to ensure a flip and a non flip
            # when the maps are merged.
            # We have maps of size (100, 100), (50, 99) and (99, 50).
            textures2 = TexturesUV(
                maps=[colors2[::2, :-1]],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            offset = torch.tensor([0, 0, 0.5], device=device)
            textures3 = TexturesUV(
                maps=[colors2[:-1, ::2] + offset],
                faces_uvs=[faces],
                verts_uvs=[verts_uvs2],
                align_corners=align_corners,
                padding_mode=padding_mode,
            )
            mesh1 = Meshes(verts=[verts], faces=[faces], textures=textures1)
            mesh2 = Meshes(verts=[verts_shifted1], faces=[faces], textures=textures2)
            mesh3 = Meshes(verts=[verts_shifted2], faces=[faces], textures=textures3)
            mesh = join_meshes_as_scene([mesh1, mesh2, mesh3])

            output = renderer(mesh)[0, ..., :3].cpu()
            output1 = renderer(mesh1)[0, ..., :3].cpu()
            output2 = renderer(mesh2)[0, ..., :3].cpu()
            output3 = renderer(mesh3)[0, ..., :3].cpu()
            # The background color is white and the objects do not overlap, so we can
            # predict the merged image by taking the minimum over every channel
            merged = torch.min(torch.min(output1, output2), output3)

            image_ref = load_rgb_image(f"test_joinuvs{i}_final.png", DATA_DIR)
            map_ref = load_rgb_image(f"test_joinuvs{i}_map.png", DATA_DIR)

            if DEBUG:
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_final_.png"
                )
                Image.fromarray((output.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_merged.png"
                )

                Image.fromarray((output1.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_1.png"
                )
                Image.fromarray((output2.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_2.png"
                )
                Image.fromarray((output3.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"test_joinuvs{i}_3.png"
                )
                Image.fromarray(
                    (mesh.textures.maps_padded()[0].cpu().numpy() * 255).astype(
                        np.uint8
                    )
                ).save(DATA_DIR / f"test_joinuvs{i}_map_.png")
                Image.fromarray(
                    (mesh2.textures.maps_padded()[0].cpu().numpy() * 255).astype(
                        np.uint8
                    )
                ).save(DATA_DIR / f"test_joinuvs{i}_map2.png")
                Image.fromarray(
                    (mesh3.textures.maps_padded()[0].cpu().numpy() * 255).astype(
                        np.uint8
                    )
                ).save(DATA_DIR / f"test_joinuvs{i}_map3.png")

            self.assertClose(output, merged, atol=0.015)
            self.assertClose(output, image_ref, atol=0.05)
            self.assertClose(mesh.textures.maps_padded()[0].cpu(), map_ref, atol=0.05)
Example #17
0
    def test_simple_sphere(self, elevated_camera=False, check_depth=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = "_"
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

            raster_settings = RasterizationSettings(
                image_size=512, blur_radius=0.0, faces_per_pixel=1
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            # Test several shaders
            shaders = {
                "phong": HardPhongShader,
                "gouraud": HardGouraudShader,
                "flat": HardFlatShader,
            }
            for (name, shader_init) in shaders.items():
                shader = shader_init(
                    lights=lights,
                    cameras=cameras,
                    materials=materials,
                    blend_params=blend_params,
                )
                if check_depth:
                    renderer = MeshRendererWithFragments(
                        rasterizer=rasterizer, shader=shader
                    )
                    images, fragments = renderer(sphere_mesh)
                    self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf)
                else:
                    renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
                    images = renderer(sphere_mesh)

                rgb = images[0, ..., :3].squeeze().cpu()
                filename = "simple_sphere_light_%s%s%s.png" % (
                    name,
                    postfix,
                    cam_type.__name__,
                )

                image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

                if DEBUG:
                    filename = "DEBUG_%s" % filename
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )

            ########################################################
            # Move the light to the +z axis in world space so it is
            # behind the sphere. Note that +Z is in, +Y up,
            # +X left for both world and camera space.
            ########################################################
            lights.location[..., 2] = -2.0
            phong_shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            if check_depth:
                phong_renderer = MeshRendererWithFragments(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images, fragments = phong_renderer(sphere_mesh, lights=lights)
                self.assertClose(
                    fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf
                )
            else:
                phong_renderer = MeshRenderer(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images = phong_renderer(sphere_mesh, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_simple_sphere_dark%s%s.png" % (
                    postfix,
                    cam_type.__name__,
                )
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )

            image_ref_phong_dark = load_rgb_image(
                "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__),
                DATA_DIR,
            )
            self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
Example #18
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(
            obj_filename, device=device, load_textures=True, texture_wrap=None
        )
        tex_map = list(aux.texture_images.values())[0]
        tex_map = tex_map[None, ...].to(faces.textures_idx.device)
        textures = TexturesUV(
            maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs]
        )
        mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png"
                )

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        #################################
        # Add blurring to rasterization
        #################################
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
            clip_barycentric_coords=True,
            perspective_correct=False,
        )

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
            )
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png"
                )

            self.assertClose(rgb, image_ref, atol=0.05)
Example #19
0
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders with batched
        lighting and hard and soft blending.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=4
        )

        # Init shader settings
        materials = Materials(device=device)
        lights_location = torch.tensor([0.0, 0.0, +2.0], device=device)
        lights_location = lights_location[None].expand(batch_size, -1)
        lights = PointLights(device=device, location=lights_location)
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shader_tests = [
            ShaderTest(HardPhongShader, "phong", "hard_phong"),
            ShaderTest(SoftPhongShader, "phong", "soft_phong"),
            ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"),
            ShaderTest(HardFlatShader, "flat", "hard_flat"),
        ]
        for test in shader_tests:
            reference_name = test.reference_name
            debug_name = test.debug_name
            shader = test.shader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s_%s.png"
                % (reference_name, type(cameras).__name__),
                DATA_DIR,
            )
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
                        debug_name,
                        type(cameras).__name__,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
                self.assertClose(rgb, image_ref, atol=0.05)
    def test_render_shapenet_core(self):
        """
        Test rendering objects from ShapeNetCore.
        """
        # Setup device and seed for random selections.
        device = torch.device("cuda:0")
        torch.manual_seed(39)

        # Load category piano from ShapeNetCore.
        piano_dataset = ShapeNetCore(SHAPENET_PATH, synsets=["piano"])

        # Rendering settings.
        R, T = look_at_view_transform(1.0, 1.0, 90)
        cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
        raster_settings = RasterizationSettings(image_size=512)
        lights = PointLights(
            location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
            # TODO: debug the source of the discrepancy in two images when rendering on GPU.
            diffuse_color=((0, 0, 0), ),
            specular_color=((0, 0, 0), ),
            device=device,
        )

        # Render first three models in the piano category.
        pianos = piano_dataset.render(
            idxs=list(range(3)),
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )
        # Check that there are three images in the batch.
        self.assertEqual(pianos.shape[0], 3)

        # Compare the rendered models to the reference images.
        for idx in range(3):
            piano_rgb = pianos[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (piano_rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR /
                        ("DEBUG_shapenet_core_render_piano_by_idxs_%s.png" %
                         idx))
            image_ref = load_rgb_image(
                "test_shapenet_core_render_piano_%s.png" % idx, DATA_DIR)
            self.assertClose(piano_rgb, image_ref, atol=0.05)

        # Render the same piano models but by model_ids this time.
        pianos_2 = piano_dataset.render(
            model_ids=[
                "13394ca47c89f91525a3aaf903a41c90",
                "14755c2ee8e693aba508f621166382b0",
                "156c4207af6d2c8f1fdc97905708b8ea",
            ],
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )

        # Compare the rendered models to the reference images.
        for idx in range(3):
            piano_rgb_2 = pianos_2[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (piano_rgb_2.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR /
                        ("DEBUG_shapenet_core_render_piano_by_ids_%s.png" %
                         idx))
            image_ref = load_rgb_image(
                "test_shapenet_core_render_piano_%s.png" % idx, DATA_DIR)
            self.assertClose(piano_rgb_2, image_ref, atol=0.05)

        #######################
        # Render by categories
        #######################

        # Load ShapeNetCore.
        shapenet_dataset = ShapeNetCore(SHAPENET_PATH)

        # Render a mixture of categories and specify the number of models to be
        # randomly sampled from each category.
        mixed_objs = shapenet_dataset.render(
            categories=["faucet", "chair"],
            sample_nums=[2, 1],
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )
        # Compare the rendered models to the reference images.
        for idx in range(3):
            mixed_rgb = mixed_objs[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (mixed_rgb.numpy() * 255).astype(np.uint8)
                ).save(
                    DATA_DIR /
                    ("DEBUG_shapenet_core_render_mixed_by_categories_%s.png" %
                     idx))
            image_ref = load_rgb_image(
                "test_shapenet_core_render_mixed_by_categories_%s.png" % idx,
                DATA_DIR)
            self.assertClose(mixed_rgb, image_ref, atol=0.05)

        # Render a mixture of categories without specifying sample_nums.
        mixed_objs_2 = shapenet_dataset.render(
            categories=["faucet", "chair"],
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )
        # Compare the rendered models to the reference images.
        for idx in range(2):
            mixed_rgb_2 = mixed_objs_2[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (mixed_rgb_2.numpy() * 255).astype(np.uint8)
                ).save(
                    DATA_DIR /
                    ("DEBUG_shapenet_core_render_without_sample_nums_%s.png" %
                     idx))
            image_ref = load_rgb_image(
                "test_shapenet_core_render_without_sample_nums_%s.png" % idx,
                DATA_DIR)
            self.assertClose(mixed_rgb_2, image_ref, atol=0.05)
    def test_render_cow(self):
        """
        Test a larger textured mesh is rendered correctly in a non square image.
        """
        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        verts, faces, aux = load_obj(obj_filename,
                                     device=device,
                                     load_textures=True,
                                     texture_wrap=None)
        tex_map = list(aux.texture_images.values())[0]
        tex_map = tex_map[None, ...].to(faces.textures_idx.device)
        textures = TexturesUV(maps=tex_map,
                              faces_uvs=[faces.textures_idx],
                              verts_uvs=[aux.verts_uvs])
        mesh = Meshes(verts=[verts],
                      faces=[faces.verts_idx],
                      textures=textures)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=(512, 1024),
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_cow_image_rectangle.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_cow_image_rectangle.png")

            # NOTE some pixels can be flaky
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            self.assertTrue(cond1)
    def test_pointcloud_with_features(self):
        device = torch.device("cuda:0")
        file_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        pointcloud_filename = file_dir / "PittsburghBridge/pointcloud.npz"

        # Note, this file is too large to check in to the repo.
        # Download the file to run the test locally.
        if not path.exists(pointcloud_filename):
            url = "https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz"
            msg = (
                "pointcloud.npz not found, download from %s, save it at the path %s, and rerun"
                % (url, pointcloud_filename)
            )
            warnings.warn(msg)
            return True

        # Load point cloud
        pointcloud = np.load(pointcloud_filename)
        verts = torch.Tensor(pointcloud["verts"]).to(device)
        rgb_feats = torch.Tensor(pointcloud["rgb"]).to(device)

        verts.requires_grad = True
        rgb_feats.requires_grad = True
        point_cloud = Pointclouds(points=[verts], features=[rgb_feats])

        R, T = look_at_view_transform(20, 10, 0)
        cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)

        raster_settings = PointsRasterizationSettings(
            # Set image_size so it is not a multiple of 16 (min bin_size)
            # in order to confirm that there are no errors in coarse rasterization.
            image_size=500,
            radius=0.003,
            points_per_pixel=10,
        )

        renderer = PointsRenderer(
            rasterizer=PointsRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            compositor=AlphaCompositor(),
        )

        images = renderer(point_cloud)

        # Load reference image
        filename = "bridge_pointcloud.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(point_cloud)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.detach().numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref, atol=0.015)

        # Check grad exists.
        grad_images = torch.randn_like(images)
        images.backward(grad_images)
        self.assertIsNotNone(verts.grad)
        self.assertIsNotNone(rgb_feats.grad)
Example #23
0
    def test_save_obj_with_texture(self):
        verts = torch.tensor(
            [[0.01, 0.2, 0.301], [0.2, 0.03, 0.408], [0.3, 0.4, 0.05],
             [0.6, 0.7, 0.8]],
            dtype=torch.float32,
        )
        faces = torch.tensor([[0, 2, 1], [0, 1, 2], [3, 2, 1], [3, 1, 0]],
                             dtype=torch.int64)
        verts_uvs = torch.tensor(
            [[0.02, 0.5], [0.3, 0.03], [0.32, 0.12], [0.36, 0.17]],
            dtype=torch.float32,
        )
        faces_uvs = faces
        texture_map = torch.randint(size=(2, 2, 3), high=255) / 255.0

        with TemporaryDirectory() as temp_dir:
            obj_file = os.path.join(temp_dir, "mesh.obj")
            save_obj(
                obj_file,
                verts,
                faces,
                decimal_places=2,
                verts_uvs=verts_uvs,
                faces_uvs=faces_uvs,
                texture_map=texture_map,
            )

            expected_obj_file = "\n".join([
                "",
                "mtllib mesh.mtl",
                "usemtl mesh",
                "",
                "v 0.01 0.20 0.30",
                "v 0.20 0.03 0.41",
                "v 0.30 0.40 0.05",
                "v 0.60 0.70 0.80",
                "vt 0.02 0.50",
                "vt 0.30 0.03",
                "vt 0.32 0.12",
                "vt 0.36 0.17",
                "f 1/1 3/3 2/2",
                "f 1/1 2/2 3/3",
                "f 4/4 3/3 2/2",
                "f 4/4 2/2 1/1",
            ])
            expected_mtl_file = "\n".join(
                ["newmtl mesh", "map_Kd mesh.png", ""])

            # Check there are only 3 files in the temp dir
            tempfiles = ["mesh.obj", "mesh.png", "mesh.mtl"]
            tempfiles_dir = os.listdir(temp_dir)
            self.assertEqual(Counter(tempfiles), Counter(tempfiles_dir))

            # Check the obj file is saved correctly
            actual_file = open(obj_file, "r")
            self.assertEqual(actual_file.read(), expected_obj_file)

            # Check the mtl file is saved correctly
            mtl_file_name = os.path.join(temp_dir, "mesh.mtl")
            mtl_file = open(mtl_file_name, "r")
            self.assertEqual(mtl_file.read(), expected_mtl_file)

            # Check the texture image file is saved correctly
            texture_image = load_rgb_image("mesh.png", temp_dir)
            self.assertClose(texture_image, texture_map)
    def test_texture_map_atlas(self):
        """
        Test a mesh with a texture map as a per face atlas is loaded and rendered correctly.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh and texture as a per face texture atlas.
        verts, faces, aux = load_obj(
            obj_filename,
            device=device,
            load_textures=True,
            create_texture_atlas=True,
            texture_atlas_size=8,
            texture_wrap=None,
        )
        mesh = Meshes(
            verts=[verts],
            faces=[faces.verts_idx],
            textures=TexturesAtlas(atlas=[aux.texture_atlas]),
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                cull_backfaces=True)

        # Init shader settings
        materials = Materials(device=device,
                              specular_color=((0, 0, 0), ),
                              shininess=0.0)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # The HardPhongShader can be used directly with atlas textures.
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )

        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR)

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_atlas_8x8_back.png")

        self.assertClose(rgb, image_ref, atol=0.05)
    def test_cube_mesh_render(self):
        """
        End-End test of rendering a cube mesh with texture
        from decreasing camera distances. The camera starts
        outside the cube and enters the inside of the cube.
        """
        device = torch.device("cuda:0")
        mesh = self.load_cube_mesh_with_texture(device)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=1e-8,
            faces_per_pixel=5,
            z_clip_value=1e-2,
            perspective_correct=True,
            bin_size=0,
        )

        # Only ambient, no diffuse or specular
        lights = PointLights(
            device=device,
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
            location=[[0.0, 0.0, -3.0]],
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=SoftPhongShader(device=device, lights=lights),
        )

        # Render the cube by decreasing the distance from the camera until
        # the camera enters the cube. Check the output looks correct.
        images_list = []
        dists = np.linspace(0.1, 2.5, 20)[::-1]
        for d in dists:
            R, T = look_at_view_transform(d, 0, 0)
            T[0, 1] -= 0.1  # move down in the y axis
            cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
            images = renderer(mesh, cameras=cameras)
            rgb = images[0, ..., :3].cpu().detach()
            filename = "DEBUG_cube_dist=%.1f.jpg" % d
            im = (rgb.numpy() * 255).astype(np.uint8)
            images_list.append(im)

            # Check one of the images where the camera is inside the mesh
            if d == 0.5:
                filename = "test_render_mesh_clipped_cam_dist=0.5.jpg"
                image_ref = load_rgb_image(filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

        # Save a gif of the output - this should show
        # the camera moving inside the cube.
        if DEBUG:
            gif_filename = ("room_original.gif"
                            if raster_settings.z_clip_value is None else
                            "room_clipped.gif")
            imageio.mimsave(DATA_DIR / gif_filename, images_list, fps=2)
            save_obj(
                f=DATA_DIR / "cube.obj",
                verts=mesh.verts_packed().cpu(),
                faces=mesh.faces_packed().cpu(),
            )
Example #26
0
    def test_render_r2n2(self):
        """
        Test rendering objects from R2N2 selected both by indices and model_ids.
        """
        # Set up device and seed for random selections.
        device = torch.device("cuda:0")
        torch.manual_seed(39)

        # Load dataset in the train split.
        r2n2_dataset = R2N2("train", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH)

        # Render first three models in the dataset.
        R, T = look_at_view_transform(1.0, 1.0, 90)
        cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
        raster_settings = RasterizationSettings(image_size=512)
        lights = PointLights(
            location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
            # TODO: debug the source of the discrepancy in two images when rendering on GPU.
            diffuse_color=((0, 0, 0),),
            specular_color=((0, 0, 0),),
            device=device,
        )

        r2n2_by_idxs = r2n2_dataset.render(
            idxs=list(range(3)),
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )
        # Check that there are three images in the batch.
        self.assertEqual(r2n2_by_idxs.shape[0], 3)

        # Compare the rendered models to the reference images.
        for idx in range(3):
            r2n2_by_idxs_rgb = r2n2_by_idxs[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray((r2n2_by_idxs_rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_r2n2_render_by_idxs_%s.png" % idx)
                )
            image_ref = load_rgb_image(
                "test_r2n2_render_by_idxs_and_ids_%s.png" % idx, DATA_DIR
            )
            self.assertClose(r2n2_by_idxs_rgb, image_ref, atol=0.05)

        # Render the same models but by model_ids this time.
        r2n2_by_model_ids = r2n2_dataset.render(
            model_ids=[
                "1a4a8592046253ab5ff61a3a2a0e2484",
                "1a04dcce7027357ab540cc4083acfa57",
                "1a9d0480b74d782698f5bccb3529a48d",
            ],
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )

        # Compare the rendered models to the reference images.
        for idx in range(3):
            r2n2_by_model_ids_rgb = r2n2_by_model_ids[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray(
                    (r2n2_by_model_ids_rgb.numpy() * 255).astype(np.uint8)
                ).save(DATA_DIR / ("DEBUG_r2n2_render_by_model_ids_%s.png" % idx))
            image_ref = load_rgb_image(
                "test_r2n2_render_by_idxs_and_ids_%s.png" % idx, DATA_DIR
            )
            self.assertClose(r2n2_by_model_ids_rgb, image_ref, atol=0.05)

        ###############################
        # Test rendering by categories
        ###############################

        # Render a mixture of categories.
        categories = ["chair", "lamp"]
        mixed_objs = r2n2_dataset.render(
            categories=categories,
            sample_nums=[1, 2],
            device=device,
            cameras=cameras,
            raster_settings=raster_settings,
            lights=lights,
        )
        # Compare the rendered models to the reference images.
        for idx in range(3):
            mixed_rgb = mixed_objs[idx, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray((mixed_rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_r2n2_render_by_categories_%s.png" % idx)
                )
            image_ref = load_rgb_image(
                "test_r2n2_render_by_categories_%s.png" % idx, DATA_DIR
            )
            self.assertClose(mixed_rgb, image_ref, atol=0.05)