def cli():
    """
    Basic example for the pulsar sphere renderer using the PyTorch3D interface.

    Writes to `basic-pt3d.png`.
    """
    LOGGER.info("Rendering on GPU...")
    torch.manual_seed(1)
    n_points = 10
    width = 1_000
    height = 1_000
    device = torch.device("cuda")
    # Generate sample data.
    vert_pos = torch.rand(n_points, 3, dtype=torch.float32,
                          device=device) * 10.0
    vert_pos[:, 2] += 25.0
    vert_pos[:, :2] -= 5.0
    vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
    pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
    # Alternatively, you can also use the look_at_view_transform to get R and T:
    # R, T = look_at_view_transform(
    #     dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
    # )
    cameras = PerspectiveCameras(
        # The focal length must be double the size for PyTorch3D because of the NDC
        # coordinates spanning a range of two - and they must be normalized by the
        # sensor width (see the pulsar example). This means we need here
        # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
        focal_length=(5.0 * 2.0 / 2.0, ),
        R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
        T=torch.zeros((1, 3), dtype=torch.float32, device=device),
        image_size=((width, height), ),
        device=device,
    )
    vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
    raster_settings = PointsRasterizationSettings(
        image_size=(width, height),
        radius=vert_rad,
    )
    rasterizer = PointsRasterizer(cameras=cameras,
                                  raster_settings=raster_settings)
    renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
    # Render.
    image = renderer(
        pcl,
        gamma=(1.0e-1, ),  # Renderer blending parameter gamma, in [1., 1e-5].
        znear=(1.0, ),
        zfar=(45.0, ),
        radius_world=True,
        bg_col=torch.ones((3, ), dtype=torch.float32, device=device),
    )[0]
    LOGGER.info("Writing image to `%s`.", path.abspath("basic-pt3d.png"))
    imageio.imsave("basic-pt3d.png",
                   (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
    LOGGER.info("Done.")
Beispiel #2
0
def get_visible_points(point_clouds, cameras, depth_merge_threshold=0.01):
    """ Returns packed visibility """
    from pytorch3d.renderer import PointsRasterizationSettings, PointsRasterizer
    img_size = 256
    raster = PointsRasterizer(raster_settings=PointsRasterizationSettings(
        image_size=img_size, points_per_pixel=20, radius=3 *2.0 / img_size))
    frag = raster(point_clouds, cameras=cameras)
    depth_occ_mask = (frag.zbuf[..., 1:] -
                      frag.zbuf[..., :1]) < depth_merge_threshold
    occ_idx = frag.idx[...,1:][~depth_occ_mask]
    frag.idx[..., 1:][~depth_occ_mask] = -1
    mask = get_per_point_visibility_mask(point_clouds, frag)
    occ_idx = occ_idx[occ_idx!=-1]
    occ_idx = occ_idx.unique()
    mask[occ_idx.long()] = False
    return mask
Beispiel #3
0
 def setup(self, device):
     if self.renderer is not None: return
     R, T = look_at_view_transform(self.opt.viewpoint_distance,
                                   self.opt.viewpoint_elevation,
                                   self.opt.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = PointsRasterizationSettings(
         image_size=self.opt.raster_image_size,
         radius=self.opt.raster_radius,
         points_per_pixel=self.opt.raster_points_per_pixel,
     )
     rasterizer = PointsRasterizer(cameras=cameras,
                                   raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     self.renderer = PulsarPointsRenderer(rasterizer=rasterizer,
                                          n_channels=3).to(device)
    def _setup_render(self):
        # Unpack options ...
        opts = self.opts

        # Initialize a camera.
        # TODO(ycho): Alternatively, specify the intrinsic matrix `K` instead.
        cameras = FoVPerspectiveCameras(znear=opts.znear,
                                        zfar=opts.zfar,
                                        aspect_ratio=opts.aspect,
                                        fov=opts.fov,
                                        degrees=True,
                                        device=self.device)

        # Define the settings for rasterization and shading.
        # As we are rendering images for visualization purposes only we will set faces_per_pixel=1
        # and blur_radius=0.0. Refer to raster_points.py for explanations of
        # these parameters.
        # points_per_pixel (Optional): We will keep track of this many points per
        # pixel, returning the nearest points_per_pixel points along the z-axis

        # Create a points renderer by compositing points using an alpha compositor (nearer points
        # are weighted more heavily). See [1] for an explanation.
        if self.opts.use_mesh:
            raster_settings = RasterizationSettings(
                image_size=opts.image_size,
                blur_radius=0.0,  # hmm...
                faces_per_pixel=1)
            rasterizer = MeshRasterizer(cameras=cameras,
                                        raster_settings=raster_settings)
            lights = PointLights(device=self.device,
                                 location=[[0.0, 0.0, -3.0]])

            renderer = MeshRenderer(rasterizer=rasterizer,
                                    shader=SoftPhongShader(device=self.device,
                                                           cameras=cameras,
                                                           lights=lights))
        else:
            raster_settings = PointsRasterizationSettings(
                image_size=opts.image_size, radius=0.1, points_per_pixel=8)
            rasterizer = PointsRasterizer(cameras=cameras,
                                          raster_settings=raster_settings)
            renderer = PointsRenderer(rasterizer=rasterizer,
                                      compositor=AlphaCompositor())
        return renderer
Beispiel #5
0
    def __init__(self, cameras=None, raster_settings=None, frnn_radius=0.2):
        """
        cameras: A cameras object which has a  `transform_points` method
            which returns the transformed points after applying the
            world-to-view and view-to-screen transformations.
        compositor: Use SurfaceSplatting compositor by default unless user overwrites it, in that case,
            issue a warning
        raster_settings: the parameters for rasterization. This should be a
            named tuple.

        All these initial settings can be overridden by passing keyword
        arguments to the forward function.
        """
        if raster_settings is None:
            raster_settings = PointsRasterizationSettings()
        super().__init__(cameras=cameras, raster_settings=raster_settings)

        self.raster_settings = raster_settings
        self.frnn_radius = frnn_radius
Beispiel #6
0
    def test_points_renderer_to(self):
        """
        Test moving all the tensors in the points renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        raster_settings = PointsRasterizationSettings(image_size=256,
                                                      radius=0.001,
                                                      points_per_pixel=1)
        cameras = FoVPerspectiveCameras(device=device1,
                                        R=R,
                                        T=T,
                                        aspect_ratio=1.0,
                                        fov=60.0,
                                        zfar=100)
        rasterizer = PointsRasterizer(cameras=cameras,
                                      raster_settings=raster_settings)

        renderer = PointsRenderer(rasterizer=rasterizer,
                                  compositor=AlphaCompositor())

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        pointclouds = Pointclouds(points=verts_padded,
                                  features=torch.randn_like(verts_padded))
        self._check_points_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device1)

        # Move renderer and pointclouds to another device and re render
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        pointclouds = pointclouds.to(device2)
        self._check_points_renderer_props_on_device(renderer, device2)
        output_images = renderer(pointclouds)
        self.assertEqual(output_images.device, device2)
Beispiel #7
0
    def test_pointcloud(self):
        data = _CommonData()
        clouds = Pointclouds(points=torch.tensor([[data.point]])).extend(2)
        colorful_cloud = Pointclouds(points=torch.tensor([[data.point]]),
                                     features=torch.ones(1, 1, 3)).extend(2)
        points_per_pixel = 2
        # for camera in [data.camera_screen]:
        for camera in (data.camera_ndc, data.camera_screen):
            rasterizer = PointsRasterizer(
                cameras=camera,
                raster_settings=PointsRasterizationSettings(
                    image_size=data.image_size,
                    radius=0.0001,
                    points_per_pixel=points_per_pixel,
                ),
            )
            # when rasterizing we expect only one pixel to be occupied
            rasterizer_output = rasterizer(clouds).idx
            self.assertTupleEqual(rasterizer_output.shape, (2, ) +
                                  data.image_size + (points_per_pixel, ))
            found = torch.nonzero(rasterizer_output != -1)
            self.assertTupleEqual(found.shape, (2, 4))
            self.assertListEqual(found[0].tolist(), [0, data.y, data.x, 0])
            self.assertListEqual(found[1].tolist(), [1, data.y, data.x, 0])

            if camera.in_ndc():
                # Pulsar not currently working in screen space.
                pulsar_renderer = PulsarPointsRenderer(rasterizer=rasterizer)
                pulsar_output = pulsar_renderer(colorful_cloud,
                                                gamma=(0.1, 0.1),
                                                znear=(0.1, 0.1),
                                                zfar=(70, 70))
                self.assertTupleEqual(pulsar_output.shape,
                                      (2, ) + data.image_size + (3, ))
                # Look for points rendered in the red channel only, expecting our one.
                # Check the first batch element only.
                # TODO: Something is odd with the second.
                found = torch.nonzero(pulsar_output[0, :, :, 0])
                self.assertTupleEqual(found.shape, (1, 2))
                self.assertListEqual(found[0].tolist(), [data.y, data.x])
Beispiel #8
0
    def __init__(self, cfgs):
        super(Renderer, self).__init__()

        self.device = cfgs.get('device', 'cuda:0')
        self.image_size = cfgs.get('image_size', 64)
        self.min_depth = cfgs.get('min_depth', 0.9)
        self.max_depth = cfgs.get('max_depth', 1.1)
        self.rot_center_depth = cfgs.get('rot_center_depth',
                                         (self.min_depth + self.max_depth) / 2)
        # todo: FoV (Field of View) was set to be an fixed value of 10 degree (according to the paper).
        self.fov = cfgs.get('fov', 10)
        self.tex_cube_size = cfgs.get('tex_cube_size', 2)
        self.renderer_min_depth = cfgs.get('renderer_min_depth', 0.1)
        self.renderer_max_depth = cfgs.get('renderer_max_depth', 10.)

        #### camera intrinsics
        #             (u)   (x)
        #    d * K^-1 (v) = (y)
        #             (1)   (z)

        ## renderer for visualization
        R = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]
        R = torch.FloatTensor(R).to(self.device)
        t = torch.zeros(1, 3, dtype=torch.float32).to(self.device)

        ## todo: K is the camera intrinsic matrix.
        fx = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        fy = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        cx = (self.image_size - 1) / 2
        cy = (self.image_size - 1) / 2
        K = [[fx, 0., cx], [0., fy, cy], [0., 0., 1.]]
        K = torch.FloatTensor(K).to(self.device)
        self.inv_K = torch.inverse(K).unsqueeze(0)
        self.K = K.unsqueeze(0)

        ## todo: define Renderer.
        ## use renderer from pytorch3d.
        # fixme: znear and zfar is equivalent to the neural renderer default settings.
        cameras = OpenGLOrthographicCameras(device=self.device,
                                            R=R,
                                            T=t,
                                            znear=0.01,
                                            zfar=100)
        # cameras = OpenGLPerspectiveCameras(device=self.device, R=R, T=t,
        #                                   znear=self.renderer_min_depth,
        #                                   zfar=self.renderer_max_depth,
        #                                   fov=self.fov)

        raster_settings = PointsRasterizationSettings(
            image_size=self.image_size,
            radius=0.003,
            points_per_pixel=10,
            bin_size=None,
            max_points_per_bin=None)

        self.renderer = PointsRenderer(
            rasterizer=PointsRasterizer(cameras=cameras,
                                        raster_settings=raster_settings),
            compositor=AlphaCompositor(composite_params=None))
Beispiel #9
0
        "/home/dari/Projects/pointrendering2/Code/scenes/tt_train_colmap_undis/"
    )

    cameras = scene.GetCamera(1)

    # cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)

    # print("Num Cameras", len(cameras))
    # assert len(cameras) == 1

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters.
    raster_settings = PointsRasterizationSettings(
        image_size=scene.image_size,
        # radius=0.003,
        radius=0.005,
        points_per_pixel=10)

    # Create a points renderer by compositing points using an alpha compositor (nearer points
    # are weighted more heavily). See [1] for an explanation.
    rasterizer = PointsRasterizer(cameras=cameras,
                                  raster_settings=raster_settings)
    renderer = PointsRenderer(rasterizer=rasterizer,
                              compositor=AlphaCompositor())

    images = renderer(scene.point_cloud)
    PrintTensorInfo(images)

    save_image(images[0].permute((2, 0, 1)), "debug/img0.jpg")
Beispiel #10
0
    def test_compare_with_pointclouds_renderer(self,
                                               batch_size=11,
                                               volume_size=(30, 30, 30),
                                               image_size=(200, 250)):
        """
        Generate a volume and its corresponding point cloud and check whether
        PointsRenderer returns the same images as the corresponding VolumeRenderer.
        """

        # generate NDC camera extrinsics and intrinsics
        cameras = init_cameras(batch_size, image_size=image_size, ndc=True)

        # init the boundary volume
        for shape in ("sphere", "cube"):

            if not DEBUG and shape == "cube":
                # do not run numeric checks for the cube as the
                # differences in rendering equations make the renders incomparable
                continue

            # get rand offset of the volume
            volume_translation = torch.randn(batch_size, 3) * 0.1
            # volume_translation[2] = 0.1
            volumes = init_boundary_volume(
                volume_size=volume_size,
                batch_size=batch_size,
                shape=shape,
                volume_translation=volume_translation,
            )[0]

            # convert the volumes to a pointcloud
            points = []
            points_features = []
            for densities_one, features_one, grid_one in zip(
                    volumes.densities(),
                    volumes.features(),
                    volumes.get_coord_grid(world_coordinates=True),
            ):
                opaque = densities_one.view(-1) > 1e-4
                points.append(grid_one.view(-1, 3)[opaque])
                points_features.append(features_one.reshape(3, -1).t()[opaque])
            pointclouds = Pointclouds(points, features=points_features)

            # init the grid raysampler with the ndc grid
            coord_range = 1.0
            half_pix_size = coord_range / max(*image_size)
            raysampler = NDCMultinomialRaysampler(
                image_width=image_size[1],
                image_height=image_size[0],
                n_pts_per_ray=256,
                min_depth=0.1,
                max_depth=2.0,
            )

            # get the EA raymarcher
            raymarcher = EmissionAbsorptionRaymarcher()

            # jitter the camera intrinsics a bit for each render
            cameras_randomized = cameras.clone()
            cameras_randomized.principal_point = (
                torch.randn_like(cameras.principal_point) * 0.3)
            cameras_randomized.focal_length = (
                cameras.focal_length +
                torch.randn_like(cameras.focal_length) * 0.2)

            # get the volumetric render
            images = VolumeRenderer(raysampler=raysampler,
                                    raymarcher=raymarcher,
                                    sample_mode="bilinear")(
                                        cameras=cameras_randomized,
                                        volumes=volumes)[0][..., :3]

            # instantiate the points renderer
            point_radius = 6 * half_pix_size
            points_renderer = PointsRenderer(
                rasterizer=PointsRasterizer(
                    cameras=cameras_randomized,
                    raster_settings=PointsRasterizationSettings(
                        image_size=image_size,
                        radius=point_radius,
                        points_per_pixel=10),
                ),
                compositor=AlphaCompositor(),
            )

            # get the point render
            images_pts = points_renderer(pointclouds)

            if shape == "sphere":
                diff = (images - images_pts).abs().mean(dim=-1)
                mu_diff = diff.mean(dim=(1, 2))
                std_diff = diff.std(dim=(1, 2))
                self.assertClose(mu_diff, torch.zeros_like(mu_diff), atol=3e-2)
                self.assertClose(std_diff,
                                 torch.zeros_like(std_diff),
                                 atol=6e-2)

            if DEBUG:
                outdir = tempfile.gettempdir() + "/test_volume_vs_pts_renderer"
                os.makedirs(outdir, exist_ok=True)

                frames = []
                for (image, image_pts) in zip(images, images_pts):
                    diff_image = (((image - image_pts) * 0.5 + 0.5).mean(
                        dim=2, keepdim=True).repeat(1, 1, 3))
                    image_pil = Image.fromarray(
                        (torch.cat((image, image_pts, diff_image),
                                   dim=1).detach().cpu().numpy() *
                         255.0).astype(np.uint8))
                    frames.append(image_pil)

                # export gif
                outfile = os.path.join(outdir,
                                       f"volume_vs_pts_render_{shape}.gif")
                frames[0].save(
                    outfile,
                    save_all=True,
                    append_images=frames[1:],
                    duration=batch_size // 15,
                    loop=0,
                )
                print(f"exported {outfile}")

                # export concatenated frames
                outfile_cat = os.path.join(
                    outdir, f"volume_vs_pts_render_{shape}.png")
                Image.fromarray(
                    np.concatenate([np.array(f) for f in frames],
                                   axis=0)).save(outfile_cat)
                print(f"exported {outfile_cat}")
Beispiel #11
0
    # pespective projection: x=fX/Z assuming px=py=0, normalization of Z
    verts[:, :,
          1] = verts[:, :, 1].clone() * proj_cam[:, :1] / verts[:, :,
                                                                2].clone()
    verts[:, :,
          0] = verts[:, :, 0].clone() * proj_cam[:, :1] / verts[:, :,
                                                                2].clone()
    verts[:, :,
          2] = ((verts[:, :, 2] - verts[:, :, 2].min()) /
                (verts[:, :, 2].max() - verts[:, :, 2].min()) - 0.5).detach()
    verts[:, :, 2] += 10

    features = torch.ones_like(verts)
    point_cloud = Pointclouds(points=verts[:, :, :3],
                              features=torch.Tensor(
                                  mesh.visual.vertex_colors[None]).cuda())

    cameras = OrthographicCameras(device=device)
    raster_settings = PointsRasterizationSettings(image_size=img_size,
                                                  radius=0.005,
                                                  points_per_pixel=10)
    renderer = PointsRenderer(
        rasterizer=PointsRasterizer(cameras=cameras,
                                    raster_settings=raster_settings),
        compositor=AlphaCompositor(background_color=(33, 33, 33)))
    img_pred = renderer(point_cloud)
    frames.append(img_pred[0, :, :, :3].cpu())
    #cv2.imwrite('%s/points%04d.png'%(args.outdir,i), np.asarray(img_pred[0,:,:,:3].cpu())[:,:,::-1])
imageio.mimsave('./output-depth.gif', frames, duration=5. / len(frames))
Beispiel #12
0
for epoch in range(opt.epoch, opt.n_epochs):
    for i, batch in enumerate(train_loader):

        # Model inputs
        real_RGB  = batch[0].float() # real_RGB
        real_RGB = real_RGB.to(device)
        point  = batch[1].float()
        point = point.to(device)
        geom = batch[2]
        geom = geom.to(device)
        # Initialize a camera.
        R, T = look_at_view_transform(1, 0, 0) #真上
        cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0)
        raster_settings = PointsRasterizationSettings(
            image_size=512, 
            radius = 0.01,
            points_per_pixel = 10
        )


        # Create a points renderer by compositing points using an alpha compositor (nearer points
        # are weighted more heavily). See [1] for an explanation.
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        renderer = PointsRenderer(
            rasterizer=rasterizer,
            compositor=AlphaCompositor()
        )
        verts = Pointclouds(points=[point[0,:,:]])
        real_A = rasterizer(verts)
        real_A = real_A[1]
        # print(real_A.size())
Beispiel #13
0
    def test_ndc_grid_sample_rendering(self):
        """
        Use PyTorch3D point renderer to render a colored point cloud, then
        sample the image at the locations of the point projections with
        `ndc_grid_sample`. Finally, assert that the sampled colors are equal to the
        original point cloud colors.

        Note that, in order to ensure correctness, we use a nearest-neighbor
        assignment point renderer (i.e. no soft splatting).
        """

        # generate a bunch of 3D points on a regular grid lying in the z-plane
        n_grid_pts = 10
        grid_scale = 0.9
        z_plane = 2.0
        image_size = [128, 128]
        point_radius = 0.015
        n_pts = n_grid_pts * n_grid_pts
        pts = torch.stack(
            meshgrid_ij([torch.linspace(-grid_scale, grid_scale, n_grid_pts)] *
                        2, ),
            dim=-1,
        )
        pts = torch.cat([pts, z_plane * torch.ones_like(pts[..., :1])], dim=-1)
        pts = pts.reshape(1, n_pts, 3)

        # color the points randomly
        pts_colors = torch.rand(1, n_pts, 3)

        # make trivial rendering cameras
        cameras = PerspectiveCameras(
            R=eyes(dim=3, N=1),
            device=pts.device,
            T=torch.zeros(1, 3, dtype=torch.float32, device=pts.device),
        )

        # render the point cloud
        pcl = Pointclouds(points=pts, features=pts_colors)
        renderer = NearestNeighborPointsRenderer(
            rasterizer=PointsRasterizer(
                cameras=cameras,
                raster_settings=PointsRasterizationSettings(
                    image_size=image_size,
                    radius=point_radius,
                    points_per_pixel=1,
                ),
            ),
            compositor=AlphaCompositor(),
        )
        im_render = renderer(pcl)

        # sample the render at projected pts
        pts_proj = cameras.transform_points(pcl.points_padded())[..., :2]
        pts_colors_sampled = ndc_grid_sample(
            im_render,
            pts_proj,
            mode="nearest",
            align_corners=False,
        ).permute(0, 2, 1)

        # assert that the samples are the same as original points
        self.assertClose(pts_colors, pts_colors_sampled, atol=1e-4)
Beispiel #14
0
def render_point_cloud_pytorch3d(
    camera,
    point_cloud,
    render_size: Tuple[int, int],
    point_radius: float = 0.03,
    topk: int = 10,
    eps: float = 1e-2,
    bg_color=None,
    bin_size: Optional[int] = None,
    **kwargs
):

    # feature dimension
    featdim = point_cloud.features_packed().shape[-1]

    # move to the camera coordinates; using identity cameras in the renderer
    point_cloud = _transform_points(camera, point_cloud, eps, **kwargs)
    camera_trivial = camera.clone()
    camera_trivial.R[:] = torch.eye(3)
    camera_trivial.T *= 0.0

    bin_size = (
        bin_size
        if bin_size is not None
        else (64 if int(max(render_size)) > 1024 else None)
    )
    rasterizer = PointsRasterizer(
        cameras=camera_trivial,
        raster_settings=PointsRasterizationSettings(
            image_size=render_size,
            radius=point_radius,
            points_per_pixel=topk,
            bin_size=bin_size,
        ),
    )

    fragments = rasterizer(point_cloud, **kwargs)

    # Construct weights based on the distance of a point to the true point.
    # However, this could be done differently: e.g. predicted as opposed
    # to a function of the weights.
    r = rasterizer.raster_settings.radius

    # set up the blending weights
    dists2 = fragments.dists
    weights = 1 - dists2 / (r * r)
    ok = cast(torch.BoolTensor, (fragments.idx >= 0)).float()

    weights = weights * ok

    fragments_prm = fragments.idx.long().permute(0, 3, 1, 2)
    weights_prm = weights.permute(0, 3, 1, 2)
    images = AlphaCompositor()(
        fragments_prm,
        weights_prm,
        point_cloud.features_packed().permute(1, 0),
        background_color=bg_color if bg_color is not None else [0.0] * featdim,
        **kwargs,
    )

    # get the depths ...
    # weighted_fs[b,c,i,j] = sum_k cum_alpha_k * features[c,pointsidx[b,k,i,j]]
    # cum_alpha_k = alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j])
    cumprod = torch.cumprod(1 - weights, dim=-1)
    cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1)
    depths = (weights * cumprod * fragments.zbuf).sum(dim=-1)
    # add the rendering mask
    render_mask = -torch.prod(1.0 - weights, dim=-1) + 1.0

    # cat depths and render mask
    rendered_blob = torch.cat((images, depths[:, None], render_mask[:, None]), dim=1)

    # reshape back
    rendered_blob = Fu.interpolate(
        rendered_blob,
        # pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int,
        #  ...]`.
        size=tuple(render_size),
        mode="bilinear",
    )

    data_rendered, depth_rendered, render_mask = rendered_blob.split(
        [rendered_blob.shape[1] - 2, 1, 1],
        dim=1,
    )

    return data_rendered, render_mask, depth_rendered