def cli():
    """
    Basic example for the pulsar sphere renderer using the PyTorch3D interface.

    Writes to `basic-pt3d.png`.
    """
    LOGGER.info("Rendering on GPU...")
    torch.manual_seed(1)
    n_points = 10
    width = 1_000
    height = 1_000
    device = torch.device("cuda")
    # Generate sample data.
    vert_pos = torch.rand(n_points, 3, dtype=torch.float32,
                          device=device) * 10.0
    vert_pos[:, 2] += 25.0
    vert_pos[:, :2] -= 5.0
    vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
    pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
    # Alternatively, you can also use the look_at_view_transform to get R and T:
    # R, T = look_at_view_transform(
    #     dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
    # )
    cameras = PerspectiveCameras(
        # The focal length must be double the size for PyTorch3D because of the NDC
        # coordinates spanning a range of two - and they must be normalized by the
        # sensor width (see the pulsar example). This means we need here
        # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
        focal_length=(5.0 * 2.0 / 2.0, ),
        R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
        T=torch.zeros((1, 3), dtype=torch.float32, device=device),
        image_size=((width, height), ),
        device=device,
    )
    vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
    raster_settings = PointsRasterizationSettings(
        image_size=(width, height),
        radius=vert_rad,
    )
    rasterizer = PointsRasterizer(cameras=cameras,
                                  raster_settings=raster_settings)
    renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
    # Render.
    image = renderer(
        pcl,
        gamma=(1.0e-1, ),  # Renderer blending parameter gamma, in [1., 1e-5].
        znear=(1.0, ),
        zfar=(45.0, ),
        radius_world=True,
        bg_col=torch.ones((3, ), dtype=torch.float32, device=device),
    )[0]
    LOGGER.info("Writing image to `%s`.", path.abspath("basic-pt3d.png"))
    imageio.imsave("basic-pt3d.png",
                   (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
    LOGGER.info("Done.")
Пример #2
0
 def setup(self, device):
     if self.renderer is not None: return
     R, T = look_at_view_transform(self.opt.viewpoint_distance,
                                   self.opt.viewpoint_elevation,
                                   self.opt.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = PointsRasterizationSettings(
         image_size=self.opt.raster_image_size,
         radius=self.opt.raster_radius,
         points_per_pixel=self.opt.raster_points_per_pixel,
     )
     rasterizer = PointsRasterizer(cameras=cameras,
                                   raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     self.renderer = PulsarPointsRenderer(rasterizer=rasterizer,
                                          n_channels=3).to(device)
Пример #3
0
    def test_pointcloud(self):
        data = _CommonData()
        clouds = Pointclouds(points=torch.tensor([[data.point]])).extend(2)
        colorful_cloud = Pointclouds(points=torch.tensor([[data.point]]),
                                     features=torch.ones(1, 1, 3)).extend(2)
        points_per_pixel = 2
        # for camera in [data.camera_screen]:
        for camera in (data.camera_ndc, data.camera_screen):
            rasterizer = PointsRasterizer(
                cameras=camera,
                raster_settings=PointsRasterizationSettings(
                    image_size=data.image_size,
                    radius=0.0001,
                    points_per_pixel=points_per_pixel,
                ),
            )
            # when rasterizing we expect only one pixel to be occupied
            rasterizer_output = rasterizer(clouds).idx
            self.assertTupleEqual(rasterizer_output.shape, (2, ) +
                                  data.image_size + (points_per_pixel, ))
            found = torch.nonzero(rasterizer_output != -1)
            self.assertTupleEqual(found.shape, (2, 4))
            self.assertListEqual(found[0].tolist(), [0, data.y, data.x, 0])
            self.assertListEqual(found[1].tolist(), [1, data.y, data.x, 0])

            if camera.in_ndc():
                # Pulsar not currently working in screen space.
                pulsar_renderer = PulsarPointsRenderer(rasterizer=rasterizer)
                pulsar_output = pulsar_renderer(colorful_cloud,
                                                gamma=(0.1, 0.1),
                                                znear=(0.1, 0.1),
                                                zfar=(70, 70))
                self.assertTupleEqual(pulsar_output.shape,
                                      (2, ) + data.image_size + (3, ))
                # Look for points rendered in the red channel only, expecting our one.
                # Check the first batch element only.
                # TODO: Something is odd with the second.
                found = torch.nonzero(pulsar_output[0, :, :, 0])
                self.assertTupleEqual(found.shape, (1, 2))
                self.assertListEqual(found[0].tolist(), [data.y, data.x])