def test_simple_sphere_batched(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        batch_size = 20
        pointclouds = pointclouds.extend(batch_size)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        images = renderer(pointclouds)
        for i in range(batch_size):
            rgb = images[i, ..., :3].squeeze().cpu()
            if i == 0 and DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        # Shift vertices to check coordinate frames are correct.
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
    def test_texture_sampling_cow(self):
        # test texture sampling for the cow example by converting
        # the cow mesh and its texture uv to a pointcloud with texture

        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        for text_type in ("uv", "atlas"):
            # Load mesh + texture
            if text_type == "uv":
                mesh = load_objs_as_meshes(
                    [obj_filename], device=device, load_textures=True, texture_wrap=None
                )
            elif text_type == "atlas":
                mesh = load_objs_as_meshes(
                    [obj_filename],
                    device=device,
                    load_textures=True,
                    create_texture_atlas=True,
                    texture_atlas_size=8,
                    texture_wrap=None,
                )

            points, normals, textures = sample_points_from_meshes(
                mesh, num_samples=50000, return_normals=True, return_textures=True
            )
            pointclouds = Pointclouds(points, normals=normals, features=textures)

            for pos in ("front", "back"):
                # Init rasterizer settings
                if pos == "back":
                    azim = 0.0
                elif pos == "front":
                    azim = 180
                R, T = look_at_view_transform(2.7, 0, azim)
                cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

                raster_settings = PointsRasterizationSettings(
                    image_size=512, radius=1e-2, points_per_pixel=1
                )

                rasterizer = PointsRasterizer(
                    cameras=cameras, raster_settings=raster_settings
                )
                compositor = NormWeightedCompositor()
                renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)
                images = renderer(pointclouds)

                rgb = images[0, ..., :3].squeeze().cpu()
                if DEBUG:
                    filename = "DEBUG_cow_mesh_to_pointcloud_%s_%s.png" % (
                        text_type,
                        pos,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )