예제 #1
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        # Shift vertices to check coordinate frames are correct.
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
def baryclip_cuda(
    num_meshes: int = 8,
    ico_level: int = 5,
    image_size: int = 64,
    faces_per_pixel: int = 50,
    device="cuda",
):
    # Init meshes
    sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes)
    # Init transform
    R, T = look_at_view_transform(1.0, 0.0, 0.0)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
    # Init rasterizer
    raster_settings = RasterizationSettings(
        image_size=image_size,
        blur_radius=1e-4,
        faces_per_pixel=faces_per_pixel,
        clip_barycentric_coords=True,
    )
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings)

    torch.cuda.synchronize()

    def raster_fn():
        rasterizer(sphere_meshes)
        torch.cuda.synchronize()

    return raster_fn
예제 #3
0
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(verts=verts_padded,
                               faces=faces_padded,
                               textures=textures)

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s.png" % name, DATA_DIR)
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s.png" % name
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename)
                self.assertClose(rgb, image_ref, atol=0.05)
예제 #4
0
    def test_simple_sphere_batched(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        batch_size = 20
        pointclouds = pointclouds.extend(batch_size)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        images = renderer(pointclouds)
        for i in range(batch_size):
            rgb = images[i, ..., :3].squeeze().cpu()
            if i == 0 and DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
예제 #5
0
    def test_simple_sphere_outside_zfar(self):
        """
        Test output when rendering a sphere that is beyond zfar with a SoftPhongShader.
        This renders a sphere of radius 500, with the camera at x=1500 for different
        settings of zfar.  This is intended to check 1) setting cameras.zfar propagates
        to the blender and that the rendered sphere is (soft) clipped if it is beyond
        zfar, 2) make sure there are no numerical precision/overflow errors associated
        with larger world coordinates
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded() * 500
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        for zfar in (10000.0, 100.0):
            cameras = FoVPerspectiveCameras(
                device=device, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=zfar
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0))

            shader = SoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar)

            # Load reference image
            image_ref = load_rgb_image(filename, DATA_DIR)

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / ("DEBUG_" + filename)
                )

            self.assertClose(rgb, image_ref, atol=0.05)
예제 #6
0
    def test_simple_sphere_screen(self):

        """
        Test output when rendering with PerspectiveCameras & OrthographicCameras
        in NDC vs screen space.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        R, T = look_at_view_transform(2.7, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1
        )
        half_half = (512.0 / 2.0, 512.0 / 2.0)
        for cam_type in (PerspectiveCameras, OrthographicCameras):
            cameras = cam_type(
                device=device,
                R=R,
                T=T,
                principal_point=(half_half,),
                focal_length=(half_half,),
                image_size=((512, 512),),
                in_ndc=False,
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_mesh)
            rgb = images[0, ..., :3].squeeze().cpu()
            filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__
            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"{filename}_.png"
                )

            image_ref = load_rgb_image(filename, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
예제 #7
0
    def test_silhouette_with_grad(self):
        """
        Test silhouette blending. Also check that gradient calculation works.
        """
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(5, device)
        verts, faces = sphere_mesh.get_mesh_verts_faces(0)
        sphere_mesh = Meshes(verts=[verts], faces=[faces])

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=80,
            clip_barycentric_coords=True,
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init renderer
            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(
                    cameras=cameras, raster_settings=raster_settings
                ),
                shader=SoftSilhouetteShader(blend_params=blend_params),
            )
            images = renderer(sphere_mesh)
            alpha = images[0, ..., 3].squeeze().cpu()
            if DEBUG:
                filename = os.path.join(
                    DATA_DIR, "DEBUG_%s_silhouette.png" % (cam_type.__name__)
                )
                Image.fromarray((alpha.detach().numpy() * 255).astype(np.uint8)).save(
                    filename
                )

            ref_filename = "test_%s_silhouette.png" % (cam_type.__name__)
            image_ref_filename = DATA_DIR / ref_filename
            with Image.open(image_ref_filename) as raw_image_ref:
                image_ref = torch.from_numpy(np.array(raw_image_ref))

            image_ref = image_ref.to(dtype=torch.float32) / 255.0
            self.assertClose(alpha, image_ref, atol=0.055)

            # Check grad exist
            verts.requires_grad = True
            sphere_mesh = Meshes(verts=[verts], faces=[faces])
            images = renderer(sphere_mesh)
            images[0, ...].sum().backward()
            self.assertIsNotNone(verts.grad)
예제 #8
0
    def test_mesh_renderer_to(self):
        """
        Test moving all the tensors in the mesh renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        cameras = FoVPerspectiveCameras(
            device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100
        )
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3, dtype=torch.float32, device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1)
        )
        mesh.textures = textures
        self._check_mesh_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        mesh = mesh.to(device2)
        self._check_mesh_renderer_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
예제 #9
0
    def test_simple_sphere_batched(self):
        """
        Test output of phong shading matches a reference image using
        the default values for the light sources.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_meshes = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            shader=HardPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_meshes)

        # Load ref image
        image_ref = load_rgb_image("test_simple_sphere_light.png")

        for i in range(batch_size):
            rgb = images[i, ..., :3].squeeze().cpu()
            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / f"DEBUG_simple_sphere_{i}.png"
                )
            self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
예제 #10
0
    def test_render_meshes(self):
        test = self

        class Model(nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                mesh = ico_sphere(3)
                self.register_buffer("faces", mesh.faces_padded())
                self.renderer = self.init_render()

            def init_render(self):

                cameras = FoVPerspectiveCameras()
                raster_settings = RasterizationSettings(
                    image_size=128, blur_radius=0.0, faces_per_pixel=1
                )
                lights = PointLights(
                    ambient_color=((1.0, 1.0, 1.0),),
                    diffuse_color=((0, 0.0, 0),),
                    specular_color=((0.0, 0, 0),),
                    location=((0.0, 0.0, 1e5),),
                )
                renderer = MeshRenderer(
                    rasterizer=MeshRasterizer(
                        cameras=cameras, raster_settings=raster_settings
                    ),
                    shader=HardGouraudShader(cameras=cameras, lights=lights),
                )
                return renderer

            def forward(self, verts, texs):
                batch_size = verts.size(0)
                self.renderer = self.renderer.to(verts.device)
                tex = TexturesVertex(verts_features=texs)
                faces = self.faces.expand(batch_size, -1, -1).to(verts.device)
                mesh = Meshes(verts, faces, tex).to(verts.device)

                test._check_mesh_renderer_props_on_device(self.renderer, verts.device)
                img_render = self.renderer(mesh)
                return img_render[:, :, :, :3]

        # DataParallel requires every input tensor be provided
        # on the first device in its device_ids list.
        verts = ico_sphere(3).verts_padded()
        texs = verts.new_ones(verts.shape)
        model = Model()
        model.to(GPU_LIST[0])
        model = nn.DataParallel(model, device_ids=GPU_LIST)

        # Test a few iterations
        for _ in range(100):
            model(verts, texs)
예제 #11
0
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders.
        """
        batch_size = 20
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_meshes = Meshes(verts=verts_padded,
                               faces=faces_padded,
                               textures=textures)

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        shaders = {
            "phong": HardGouraudShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(lights=lights,
                                 cameras=cameras,
                                 materials=materials)
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image("test_simple_sphere_light_%s.png" %
                                       name)
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
예제 #12
0
    def test_cameras_kwarg(self):
        """
        Test that when cameras are passed in as a kwarg the rendering
        works as expected
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        # No elevation or azimuth rotation
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

            raster_settings = RasterizationSettings(
                image_size=512, blur_radius=0.0, faces_per_pixel=1
            )
            rasterizer = MeshRasterizer(raster_settings=raster_settings)
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            shader = HardPhongShader(
                lights=lights,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

            # Cameras can be passed into the renderer in the forward pass
            images = renderer(sphere_mesh, cameras=cameras)
            rgb = images.squeeze()[..., :3].cpu().numpy()
            image_ref = load_rgb_image(
                "test_simple_sphere_light_phong_%s.png" % cam_type.__name__, DATA_DIR
            )
            self.assertClose(rgb, image_ref, atol=0.05)
예제 #13
0
    def subdivide_meshes_with_init(num_meshes: int = 10,
                                   same_topo: bool = False):
        device = torch.device("cuda:0")
        meshes = ico_sphere(0, device=device)
        if num_meshes > 1:
            meshes = meshes.extend(num_meshes)
        meshes_init = meshes.clone() if same_topo else None
        torch.cuda.synchronize()

        def subdivide_meshes():
            subdivide = SubdivideMeshes(meshes=meshes_init)
            subdivide(meshes=meshes.clone())
            torch.cuda.synchronize()

        return subdivide_meshes
예제 #14
0
    def test_silhouette_with_grad(self):
        """
        Test silhouette blending. Also check that gradient calculation works.
        """
        device = torch.device("cuda:0")
        ref_filename = "test_silhouette.png"
        image_ref_filename = DATA_DIR / ref_filename
        sphere_mesh = ico_sphere(5, device)
        verts, faces = sphere_mesh.get_mesh_verts_faces(0)
        sphere_mesh = Meshes(verts=[verts], faces=[faces])

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=80,
            bin_size=0,
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            shader=SilhouetteShader(blend_params=blend_params),
        )
        images = renderer(sphere_mesh)
        alpha = images[0, ..., 3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_silhouette_grad.png"
            )

        with Image.open(image_ref_filename) as raw_image_ref:
            image_ref = torch.from_numpy(np.array(raw_image_ref))
        image_ref = image_ref.to(dtype=torch.float32) / 255.0
        self.assertTrue(torch.allclose(alpha, image_ref, atol=0.055))

        # Check grad exist
        verts.requires_grad = True
        sphere_mesh = Meshes(verts=[verts], faces=[faces])
        images = renderer(sphere_mesh)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
    def mesh_normal_consistency_with_ico(num_meshes: int,
                                         level: int = 3,
                                         device: str = "cpu"):
        device = torch.device(device)
        mesh = ico_sphere(level, device)
        verts, faces = mesh.get_mesh_verts_faces(0)
        verts_list = [verts.clone() for _ in range(num_meshes)]
        faces_list = [faces.clone() for _ in range(num_meshes)]
        meshes = Meshes(verts_list, faces_list)
        torch.cuda.synchronize()

        def loss():
            mesh_normal_consistency(meshes)
            torch.cuda.synchronize()

        return loss
예제 #16
0
def rasterize_transform_with_init(num_meshes: int, ico_level: int = 5, device="cuda"):
    # Init meshes
    sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes)
    # Init transform
    R, T = look_at_view_transform(1.0, 0.0, 0.0)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
    # Init rasterizer
    rasterizer = MeshRasterizer(cameras=cameras)

    torch.cuda.synchronize()

    def raster_fn():
        rasterizer.transform(sphere_meshes)
        torch.cuda.synchronize()

    return raster_fn
예제 #17
0
 def test_simple_sphere_pulsar(self):
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         # Shift vertices to check coordinate frames are correct.
         verts_padded[..., 1] += 0.2
         verts_padded[..., 0] += 0.2
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         for azimuth in [0.0, 90.0]:
             R, T = look_at_view_transform(2.7, 0.0, azimuth)
             for camera_name, cameras in [
                 ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
                 (
                     "fovorthographic",
                     FoVOrthographicCameras(device=device, R=R, T=T),
                 ),
                 ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
                 ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
             ]:
                 raster_settings = PointsRasterizationSettings(
                     image_size=256, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Load reference image
                 filename = (
                     "pulsar_simple_pointcloud_sphere_"
                     f"azimuth{azimuth}_{camera_name}.png"
                 )
                 image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                 images = renderer(
                     pointclouds, gamma=(1e-3,), znear=(1.0,), zfar=(100.0,)
                 )
                 rgb = images[0, ..., :3].squeeze().cpu()
                 if DEBUG:
                     filename = "DEBUG_%s" % filename
                     Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                         DATA_DIR / filename
                     )
                 self.assertClose(rgb, image_ref, rtol=7e-3, atol=5e-3)
예제 #18
0
 def test_subdivide_features(self):
     device = torch.device("cuda:0")
     mesh = ico_sphere(0, device)
     N = 10
     mesh = mesh.extend(N)
     edges = mesh.edges_packed()
     V = mesh.num_verts_per_mesh()[0]
     D = 256
     feats = torch.rand((N * V, D),
                        dtype=torch.float32,
                        device=device,
                        requires_grad=True)  # packed features
     app_feats = feats[edges].mean(1)
     subdivide = SubdivideMeshes()
     new_mesh, new_feats = subdivide(mesh, feats)
     gt_feats = torch.cat((feats.view(N, V, D), app_feats.view(N, -1, D)),
                          dim=1).view(-1, D)
     self.assertClose(new_feats, gt_feats)
     self.assertTrue(new_feats.requires_grad == gt_feats.requires_grad)
def baryclip_pytorch(
    num_meshes: int = 8,
    ico_level: int = 5,
    image_size: int = 64,
    faces_per_pixel: int = 50,
    device="cuda",
):
    # Init meshes
    sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes)
    # Init transform
    R, T = look_at_view_transform(1.0, 0.0, 0.0)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
    # Init rasterizer
    raster_settings = RasterizationSettings(
        image_size=image_size,
        blur_radius=1e-4,
        faces_per_pixel=faces_per_pixel,
        clip_barycentric_coords=False,
    )
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings)

    torch.cuda.synchronize()

    def raster_fn():
        fragments = rasterizer(sphere_meshes)

        # Clip bary and reinterpolate
        clipped_bary_coords = _clip_barycentric_coordinates(
            fragments.bary_coords)
        clipped_zbuf = _interpolate_zbuf(fragments.pix_to_face,
                                         clipped_bary_coords, sphere_meshes)
        fragments = Fragments(
            bary_coords=clipped_bary_coords,
            zbuf=clipped_zbuf,
            dists=fragments.dists,
            pix_to_face=fragments.pix_to_face,
        )
        torch.cuda.synchronize()

    return raster_fn
예제 #20
0
    def test_points_renderer_to(self):
        """
        Test moving all the tensors in the points renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        raster_settings = PointsRasterizationSettings(image_size=256,
                                                      radius=0.001,
                                                      points_per_pixel=1)
        cameras = FoVPerspectiveCameras(device=device1,
                                        R=R,
                                        T=T,
                                        aspect_ratio=1.0,
                                        fov=60.0,
                                        zfar=100)
        rasterizer = PointsRasterizer(cameras=cameras,
                                      raster_settings=raster_settings)

        renderer = PointsRenderer(rasterizer=rasterizer,
                                  compositor=AlphaCompositor())

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        pointclouds = Pointclouds(points=verts_padded,
                                  features=torch.randn_like(verts_padded))
        self._check_points_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device1)

        # Move renderer and pointclouds to another device and re render
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        pointclouds = pointclouds.to(device2)
        self._check_points_renderer_props_on_device(renderer, device2)
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device2)
예제 #21
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_camera"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0
        )

        # Init renderer
        rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings
        )
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_phong = load_rgb_image(
            "test_simple_sphere_light%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))

        ########################################################
        # Move the light to the +z axis in world space so it is
        # behind the sphere. Note that +Z is in, +Y up,
        # +X left for both world and camera space.
        ########################################################
        lights.location[..., 2] = -2.0
        images = renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_dark%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_phong_dark = load_rgb_image(
            "test_simple_sphere_dark%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))

        ######################################
        # Change the shader to a GouraudShader
        ######################################
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardGouraudShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light_gouraud%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_gouraud = load_rgb_image(
            "test_simple_sphere_light_gouraud%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005))

        ######################################
        # Change the shader to a HardFlatShader
        ######################################
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardFlatShader(
                lights=lights, cameras=cameras, materials=materials
            ),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            filename = "DEBUG_simple_sphere_light_flat%s.png" % postfix
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / filename
            )

        # Load reference image
        image_ref_flat = load_rgb_image(
            "test_simple_sphere_light_flat%s.png" % postfix
        )
        self.assertTrue(torch.allclose(rgb, image_ref_flat, atol=0.005))
예제 #22
0
    def test_simple_sphere_batched(self):
        """
        Test a mesh with vertex textures can be extended to form a batch, and
        is rendered correctly with Phong, Gouraud and Flat Shaders with batched
        lighting and hard and soft blending.
        """
        batch_size = 5
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        sphere_meshes = ico_sphere(5, device).extend(batch_size)
        verts_padded = sphere_meshes.verts_padded()
        faces_padded = sphere_meshes.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_meshes = Meshes(
            verts=verts_padded, faces=faces_padded, textures=textures
        )

        # Init rasterizer settings
        dist = torch.tensor([2.7]).repeat(batch_size).to(device)
        elev = torch.zeros_like(dist)
        azim = torch.zeros_like(dist)
        R, T = look_at_view_transform(dist, elev, azim)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512, blur_radius=0.0, faces_per_pixel=4
        )

        # Init shader settings
        materials = Materials(device=device)
        lights_location = torch.tensor([0.0, 0.0, +2.0], device=device)
        lights_location = lights_location[None].expand(batch_size, -1)
        lights = PointLights(device=device, location=lights_location)
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shader_tests = [
            ShaderTest(HardPhongShader, "phong", "hard_phong"),
            ShaderTest(SoftPhongShader, "phong", "soft_phong"),
            ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"),
            ShaderTest(HardFlatShader, "flat", "hard_flat"),
        ]
        for test in shader_tests:
            reference_name = test.reference_name
            debug_name = test.debug_name
            shader = test.shader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            images = renderer(sphere_meshes)
            image_ref = load_rgb_image(
                "test_simple_sphere_light_%s_%s.png"
                % (reference_name, type(cameras).__name__),
                DATA_DIR,
            )
            for i in range(batch_size):
                rgb = images[i, ..., :3].squeeze().cpu()
                if i == 0 and DEBUG:
                    filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
                        debug_name,
                        type(cameras).__name__,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
                self.assertClose(rgb, image_ref, atol=0.05)
    def test_sampling_output(self):
        """
        Check outputs of sampling are correct for different meshes.
        For an ico_sphere, the sampled vertices should lie on a unit sphere.
        For an empty mesh, the samples and normals should be 0.
        """
        device = get_random_cuda_device()

        # Unit simplex.
        verts_pyramid = torch.tensor(
            [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0],
             [0.0, 0.0, 1.0]],
            dtype=torch.float32,
            device=device,
        )
        faces_pyramid = torch.tensor(
            [[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 2, 3]],
            dtype=torch.int64,
            device=device,
        )
        sphere_mesh = ico_sphere(9, device)
        verts_sphere, faces_sphere = sphere_mesh.get_mesh_verts_faces(0)
        verts_empty = torch.tensor([], dtype=torch.float32, device=device)
        faces_empty = torch.tensor([], dtype=torch.int64, device=device)
        num_samples = 10
        meshes = Meshes(
            verts=[verts_empty, verts_sphere, verts_pyramid],
            faces=[faces_empty, faces_sphere, faces_pyramid],
        )
        samples, normals = sample_points_from_meshes(meshes,
                                                     num_samples=num_samples,
                                                     return_normals=True)
        samples = samples.cpu()
        normals = normals.cpu()

        self.assertEqual(samples.shape, (3, num_samples, 3))
        self.assertEqual(normals.shape, (3, num_samples, 3))

        # Empty meshes: should have all zeros for samples and normals.
        self.assertClose(samples[0, :], torch.zeros((num_samples, 3)))
        self.assertClose(normals[0, :], torch.zeros((num_samples, 3)))

        # Sphere: points should have radius 1.
        x, y, z = samples[1, :].unbind(1)
        radius = torch.sqrt(x**2 + y**2 + z**2)

        self.assertClose(radius, torch.ones(num_samples))

        # Pyramid: points shoudl lie on one of the faces.
        pyramid_verts = samples[2, :]
        pyramid_normals = normals[2, :]

        self.assertClose(
            pyramid_verts.lt(1).float(), torch.ones_like(pyramid_verts))
        self.assertClose((pyramid_verts >= 0).float(),
                         torch.ones_like(pyramid_verts))

        # Face 1: z = 0,  x + y <= 1, normals = (0, 0, 1).
        face_1_idxs = pyramid_verts[:, 2] == 0
        face_1_verts, face_1_normals = (
            pyramid_verts[face_1_idxs, :],
            pyramid_normals[face_1_idxs, :],
        )
        self.assertTrue(
            torch.all((face_1_verts[:, 0] + face_1_verts[:, 1]) <= 1))
        self.assertClose(
            face_1_normals,
            torch.tensor([0, 0, 1],
                         dtype=torch.float32).expand(face_1_normals.size()),
        )

        # Face 2: x = 0,  z + y <= 1, normals = (1, 0, 0).
        face_2_idxs = pyramid_verts[:, 0] == 0
        face_2_verts, face_2_normals = (
            pyramid_verts[face_2_idxs, :],
            pyramid_normals[face_2_idxs, :],
        )
        self.assertTrue(
            torch.all((face_2_verts[:, 1] + face_2_verts[:, 2]) <= 1))
        self.assertClose(
            face_2_normals,
            torch.tensor([1, 0, 0],
                         dtype=torch.float32).expand(face_2_normals.size()),
        )

        # Face 3: y = 0, x + z <= 1, normals = (0, -1, 0).
        face_3_idxs = pyramid_verts[:, 1] == 0
        face_3_verts, face_3_normals = (
            pyramid_verts[face_3_idxs, :],
            pyramid_normals[face_3_idxs, :],
        )
        self.assertTrue(
            torch.all((face_3_verts[:, 0] + face_3_verts[:, 2]) <= 1))
        self.assertClose(
            face_3_normals,
            torch.tensor([0, -1, 0],
                         dtype=torch.float32).expand(face_3_normals.size()),
        )

        # Face 4: x + y + z = 1, normals = (1, 1, 1)/sqrt(3).
        face_4_idxs = pyramid_verts.gt(0).all(1)
        face_4_verts, face_4_normals = (
            pyramid_verts[face_4_idxs, :],
            pyramid_normals[face_4_idxs, :],
        )
        self.assertClose(face_4_verts.sum(1), torch.ones(face_4_verts.size(0)))
        self.assertClose(
            face_4_normals,
            (torch.tensor([1, 1, 1], dtype=torch.float32) /
             torch.sqrt(torch.tensor(3, dtype=torch.float32))).expand(
                 face_4_normals.size()),
        )
예제 #24
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        ref_filename = "test_rasterized_sphere.png"
        image_ref_filename = DATA_DIR / ref_filename

        # Rescale image_ref to the 0 - 1 range and convert to a binary mask.
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        # Init mesh
        sphere_mesh = ico_sphere(5, device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init rasterizer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)

        ####################################
        # 1. Test rasterizing a single mesh
        ####################################

        fragments = rasterizer(sphere_mesh)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        # Convert pix_to_face to a binary mask
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere.png")

        self.assertTrue(torch.allclose(image, image_ref))

        ##################################
        #  2. Test with a batch of meshes
        ##################################

        batch_size = 10
        sphere_meshes = sphere_mesh.extend(batch_size)
        fragments = rasterizer(sphere_meshes)
        for i in range(batch_size):
            image = fragments.pix_to_face[i, ..., 0].squeeze().cpu()
            image[image >= 0] = 1.0
            image[image < 0] = 0.0
            self.assertTrue(torch.allclose(image, image_ref))

        ####################################################
        #  3. Test that passing kwargs to rasterizer works.
        ####################################################

        #  Change the view transform to zoom in.
        R, T = look_at_view_transform(2.0, 0, 0, device=device)
        fragments = rasterizer(sphere_mesh, R=R, T=T)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        ref_filename = "test_rasterized_sphere_zoom.png"
        image_ref_filename = DATA_DIR / ref_filename
        image_ref = convert_image_to_binary_mask(image_ref_filename)

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere_zoom.png")
        self.assertTrue(torch.allclose(image, image_ref))

        #################################
        #  4. Test init without cameras.
        ##################################

        # Create a new empty rasterizer:
        rasterizer = MeshRasterizer()

        # Check that omitting the cameras in both initialization
        # and the forward pass throws an error:
        with self.assertRaisesRegex(ValueError, "Cameras must be specified"):
            rasterizer(sphere_mesh)

        # Now pass in the cameras as a kwarg
        fragments = rasterizer(sphere_mesh,
                               cameras=cameras,
                               raster_settings=raster_settings)
        image = fragments.pix_to_face[0, ..., 0].squeeze().cpu()
        # Convert pix_to_face to a binary mask
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere.png")

        self.assertTrue(torch.allclose(image, image_ref))
예제 #25
0
    def test_simple_sphere(self):
        device = torch.device("cuda:0")

        # Load reference image
        ref_filename = "test_simple_pointcloud_sphere.png"
        image_ref_filename = DATA_DIR / ref_filename

        # Rescale image_ref to the 0 - 1 range and convert to a binary mask.
        image_ref = convert_image_to_binary_mask(image_ref_filename).to(
            torch.int32)

        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(points=verts_padded)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(image_size=256,
                                                      radius=5e-2,
                                                      points_per_pixel=1)

        #################################
        #  1. Test init without cameras.
        ##################################

        # Initialize without passing in the cameras
        rasterizer = PointsRasterizer()

        # Check that omitting the cameras in both initialization
        # and the forward pass throws an error:
        with self.assertRaisesRegex(ValueError, "Cameras must be specified"):
            rasterizer(pointclouds)

        ##########################################
        # 2. Test rasterizing a single pointcloud
        ##########################################

        fragments = rasterizer(pointclouds,
                               cameras=cameras,
                               raster_settings=raster_settings)

        # Convert idx to a binary mask
        image = fragments.idx[0, ..., 0].squeeze().cpu()
        image[image >= 0] = 1.0
        image[image < 0] = 0.0

        if DEBUG:
            Image.fromarray((image.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_test_rasterized_sphere_points.png")

        self.assertTrue(torch.allclose(image, image_ref[..., 0]))

        ########################################
        #  3. Test with a batch of pointclouds
        ########################################

        batch_size = 10
        pointclouds = pointclouds.extend(batch_size)
        fragments = rasterizer(pointclouds,
                               cameras=cameras,
                               raster_settings=raster_settings)
        for i in range(batch_size):
            image = fragments.idx[i, ..., 0].squeeze().cpu()
            image[image >= 0] = 1.0
            image[image < 0] = 0.0
            self.assertTrue(torch.allclose(image, image_ref[..., 0]))
예제 #26
0
    def test_simple_sphere(self, elevated_camera=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        textures = Textures(verts_rgb=torch.ones_like(verts_padded))
        sphere_mesh = Meshes(verts=verts_padded,
                             faces=faces_padded,
                             textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            R, T = look_at_view_transform(2.7, 45.0, 0.0)
            postfix = "_elevated_camera"
        else:
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = ""
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras,
                                    raster_settings=raster_settings)
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardPhongShader(lights=lights,
                                   cameras=cameras,
                                   materials=materials),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix)

        # Load reference image
        image_ref_phong = load_rgb_image(
            "test_simple_sphere_illuminated%s.png" % postfix)
        self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05))

        ###################################
        # Move the light behind the object
        ###################################
        # Check the image is dark
        lights.location[..., 2] = +2.0
        images = renderer(sphere_mesh, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix)

        # Load reference image
        image_ref_phong_dark = load_rgb_image("test_simple_sphere_dark%s.png" %
                                              postfix)
        self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))

        ######################################
        # Change the shader to a GouraudShader
        ######################################
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        renderer = MeshRenderer(
            rasterizer=rasterizer,
            shader=HardGouraudShader(lights=lights,
                                     cameras=cameras,
                                     materials=materials),
        )
        images = renderer(sphere_mesh)
        rgb = images[0, ..., :3].squeeze().cpu()
        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_simple_sphere_light_gouraud%s.png" % postfix)

        # Load reference image
        image_ref_gouraud = load_rgb_image(
            "test_simple_sphere_light_gouraud%s.png" % postfix)
        self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005))
        self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))
예제 #27
0
 def __init__(self):
     super(Model, self).__init__()
     mesh = ico_sphere(3)
     self.register_buffer("faces", mesh.faces_padded())
     self.renderer = self.init_render()
예제 #28
0
    def test_simple_sphere(self, elevated_camera=False, check_depth=False):
        """
        Test output of phong and gouraud shading matches a reference image using
        the default values for the light sources.

        Args:
            elevated_camera: Defines whether the camera observing the scene should
                           have an elevation of 45 degrees.
        """
        device = torch.device("cuda:0")

        # Init mesh
        sphere_mesh = ico_sphere(5, device)
        verts_padded = sphere_mesh.verts_padded()
        faces_padded = sphere_mesh.faces_padded()
        feats = torch.ones_like(verts_padded, device=device)
        textures = TexturesVertex(verts_features=feats)
        sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)

        # Init rasterizer settings
        if elevated_camera:
            # Elevated and rotated camera
            R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
            postfix = "_elevated_"
            # If y axis is up, the spot of light should
            # be on the bottom left of the sphere.
        else:
            # No elevation or azimuth rotation
            R, T = look_at_view_transform(2.7, 0.0, 0.0)
            postfix = "_"
        for cam_type in (
            FoVPerspectiveCameras,
            FoVOrthographicCameras,
            PerspectiveCameras,
            OrthographicCameras,
        ):
            cameras = cam_type(device=device, R=R, T=T)

            # Init shader settings
            materials = Materials(device=device)
            lights = PointLights(device=device)
            lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]

            raster_settings = RasterizationSettings(
                image_size=512, blur_radius=0.0, faces_per_pixel=1
            )
            rasterizer = MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings
            )
            blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

            # Test several shaders
            shaders = {
                "phong": HardPhongShader,
                "gouraud": HardGouraudShader,
                "flat": HardFlatShader,
            }
            for (name, shader_init) in shaders.items():
                shader = shader_init(
                    lights=lights,
                    cameras=cameras,
                    materials=materials,
                    blend_params=blend_params,
                )
                if check_depth:
                    renderer = MeshRendererWithFragments(
                        rasterizer=rasterizer, shader=shader
                    )
                    images, fragments = renderer(sphere_mesh)
                    self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf)
                else:
                    renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
                    images = renderer(sphere_mesh)

                rgb = images[0, ..., :3].squeeze().cpu()
                filename = "simple_sphere_light_%s%s%s.png" % (
                    name,
                    postfix,
                    cam_type.__name__,
                )

                image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                self.assertClose(rgb, image_ref, atol=0.05)

                if DEBUG:
                    filename = "DEBUG_%s" % filename
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )

            ########################################################
            # Move the light to the +z axis in world space so it is
            # behind the sphere. Note that +Z is in, +Y up,
            # +X left for both world and camera space.
            ########################################################
            lights.location[..., 2] = -2.0
            phong_shader = HardPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            if check_depth:
                phong_renderer = MeshRendererWithFragments(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images, fragments = phong_renderer(sphere_mesh, lights=lights)
                self.assertClose(
                    fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf
                )
            else:
                phong_renderer = MeshRenderer(
                    rasterizer=rasterizer, shader=phong_shader
                )
                images = phong_renderer(sphere_mesh, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_simple_sphere_dark%s%s.png" % (
                    postfix,
                    cam_type.__name__,
                )
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )

            image_ref_phong_dark = load_rgb_image(
                "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__),
                DATA_DIR,
            )
            self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
예제 #29
0
    def test_joined_spheres(self):
        """
        Test a list of Meshes can be joined as a single mesh and
        the single mesh is rendered correctly with Phong, Gouraud
        and Flat Shaders.
        """
        device = torch.device("cuda:0")

        # Init mesh with vertex textures.
        # Initialize a list containing two ico spheres of different sizes.
        sphere_list = [ico_sphere(3, device), ico_sphere(4, device)]
        # [(42 verts, 80 faces), (162 verts, 320 faces)]
        # The scale the vertices need to be set at to resize the spheres
        scales = [0.25, 1]
        # The distance the spheres ought to be offset horizontally to prevent overlap.
        offsets = [1.2, -0.3]
        # Initialize a list containing the adjusted sphere meshes.
        sphere_mesh_list = []
        for i in range(len(sphere_list)):
            verts = sphere_list[i].verts_padded() * scales[i]
            verts[0, :, 0] += offsets[i]
            sphere_mesh_list.append(
                Meshes(verts=verts, faces=sphere_list[i].faces_padded())
            )
        joined_sphere_mesh = join_meshes_as_scene(sphere_mesh_list)
        joined_sphere_mesh.textures = TexturesVertex(
            verts_features=torch.ones_like(joined_sphere_mesh.verts_padded())
        )

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=False,
        )

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
        blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))

        # Init renderer
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
        shaders = {
            "phong": HardPhongShader,
            "gouraud": HardGouraudShader,
            "flat": HardFlatShader,
        }
        for (name, shader_init) in shaders.items():
            shader = shader_init(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            )
            renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
            image = renderer(joined_sphere_mesh)
            rgb = image[..., :3].squeeze().cpu()
            if DEBUG:
                file_name = "DEBUG_joined_spheres_%s.png" % name
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / file_name
                )
            image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR)
            self.assertClose(rgb, image_ref, atol=0.05)
예제 #30
0
 def test_unified_inputs_pulsar(self):
     # Test data on different devices.
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         R, T = look_at_view_transform(2.7, 0.0, 0.0)
         # Test the different camera types.
         for _, cameras in [
             ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
             (
                 "fovorthographic",
                 FoVOrthographicCameras(device=device, R=R, T=T),
             ),
             ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
             ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
         ]:
             # Test different ways for image size specification.
             for image_size in (256, (256, 256)):
                 raster_settings = PointsRasterizationSettings(
                     image_size=image_size, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 # Test that the compositor can be provided. It's value is ignored
                 # so use a dummy.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer, compositor=1).to(
                     device
                 )
                 # Constructor without compositor.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Constructor with n_channels.
                 _ = PulsarPointsRenderer(rasterizer=rasterizer, n_channels=3).to(
                     device
                 )
                 # Constructor with max_num_spheres.
                 renderer = PulsarPointsRenderer(
                     rasterizer=rasterizer, max_num_spheres=1000
                 ).to(device)
                 # Test the forward function.
                 if isinstance(cameras, (PerspectiveCameras, OrthographicCameras)):
                     # znear and zfar is required in this case.
                     self.assertRaises(
                         ValueError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=(1e-4,)
                         ),
                     )
                     renderer.forward(
                         point_clouds=pointclouds,
                         gamma=(1e-4,),
                         znear=(1.0,),
                         zfar=(2.0,),
                     )
                     # znear and zfar must be batched.
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds,
                             gamma=(1e-4,),
                             znear=1.0,
                             zfar=(2.0,),
                         ),
                     )
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds,
                             gamma=(1e-4,),
                             znear=(1.0,),
                             zfar=2.0,
                         ),
                     )
                 else:
                     # gamma must be batched.
                     self.assertRaises(
                         TypeError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=1e-4
                         ),
                     )
                     renderer.forward(point_clouds=pointclouds, gamma=(1e-4,))
                     # rasterizer width and height change.
                     renderer.rasterizer.raster_settings.image_size = 0
                     self.assertRaises(
                         ValueError,
                         lambda: renderer.forward(
                             point_clouds=pointclouds, gamma=(1e-4,)
                         ),
                     )