Example #1
0
def _bm_rasterize_points_with_init(N,
                                   P,
                                   img_size=32,
                                   radius=0.1,
                                   pts_per_pxl=3,
                                   device="cpu",
                                   expand_radius=False):
    torch.manual_seed(231)
    device = torch.device(device)
    points = torch.randn(N, P, 3, device=device)
    pointclouds = Pointclouds(points=points)

    if expand_radius:
        points_padded = pointclouds.points_padded()
        radius = torch.full((N, P), fill_value=radius).type_as(points_padded)

    args = (pointclouds, img_size, radius, pts_per_pxl)
    if device == "cuda":
        torch.cuda.synchronize(device)

    def fn():
        rasterize_points(*args)
        if device == "cuda":
            torch.cuda.synchronize(device)

    return fn
Example #2
0
    def test_cpu_vs_cuda_naive(self):
        torch.manual_seed(231)
        image_size = 64
        radius = 0.1
        points_per_pixel = 5

        # Test homogeneous point cloud batch.
        N = 2
        P = 1000
        bin_size = 0
        points_cpu = torch.rand(N, P, 3, requires_grad=True)
        points_cuda = points_cpu.cuda().detach().requires_grad_(True)
        pointclouds_cpu = Pointclouds(points=points_cpu)
        pointclouds_cuda = Pointclouds(points=points_cuda)
        args_cpu = (pointclouds_cpu, image_size, radius, points_per_pixel,
                    bin_size)
        args_cuda = (pointclouds_cuda, image_size, radius, points_per_pixel,
                     bin_size)
        self._compare_impls(
            rasterize_points,
            rasterize_points,
            args_cpu,
            args_cuda,
            points_cpu,
            points_cuda,
            compare_grads=True,
        )
    def test_simple_sphere_batched(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        batch_size = 20
        pointclouds = pointclouds.extend(batch_size)
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        images = renderer(pointclouds)
        for i in range(batch_size):
            rgb = images[i, ..., :3].squeeze().cpu()
            if i == 0 and DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
Example #4
0
    def init_point_cloud(
        batch_size=10,
        n_points=1000,
        dim=3,
        device=None,
        use_pointclouds=False,
        random_pcl_size=True,
        fix_seed=None,
    ):
        """
        Generate a batch of normally distributed point clouds.
        """

        if fix_seed is not None:
            # make sure we always generate the same pointcloud
            seed = torch.random.get_rng_state()
            torch.manual_seed(fix_seed)

        if use_pointclouds:
            assert dim == 3, "Pointclouds support only 3-dim points."
            # generate a `batch_size` point clouds with number of points
            # between 4 and `n_points`
            if random_pcl_size:
                n_points_per_batch = torch.randint(
                    low=4,
                    high=n_points,
                    size=(batch_size, ),
                    device=device,
                    dtype=torch.int64,
                )
                X_list = [
                    torch.randn(int(n_pt),
                                dim,
                                device=device,
                                dtype=torch.float32)
                    for n_pt in n_points_per_batch
                ]
                X = Pointclouds(X_list)
            else:
                X = torch.randn(batch_size,
                                n_points,
                                dim,
                                device=device,
                                dtype=torch.float32)
                X = Pointclouds(list(X))
        else:
            X = torch.randn(batch_size,
                            n_points,
                            dim,
                            device=device,
                            dtype=torch.float32)

        if fix_seed:
            torch.random.set_rng_state(seed)

        return X
 def test_allempty(self):
     clouds = Pointclouds([], [])
     self.assertEqual(len(clouds), 0)
     self.assertIsNone(clouds.normals_list())
     self.assertIsNone(clouds.features_list())
     self.assertEqual(clouds.points_padded().shape[0], 0)
     self.assertIsNone(clouds.normals_padded())
     self.assertIsNone(clouds.features_padded())
     self.assertEqual(clouds.points_packed().shape[0], 0)
     self.assertIsNone(clouds.normals_packed())
     self.assertIsNone(clouds.features_packed())
    def test_get_bounding_boxes(self):
        device = torch.device("cuda:0")
        points_list = []
        for size in [10]:
            points = torch.rand((size, 3), dtype=torch.float32, device=device)
            points_list.append(points)

        mins = torch.min(points, dim=0)[0]
        maxs = torch.max(points, dim=0)[0]
        bboxes_gt = torch.stack([mins, maxs], dim=1).unsqueeze(0)
        clouds = Pointclouds(points_list)
        bboxes = clouds.get_bounding_boxes()
        self.assertClose(bboxes_gt, bboxes)
Example #7
0
def init_volume_boundary_pointcloud(
    batch_size: int,
    volume_size: Tuple[int, int, int],
    n_points: int,
    interp_mode: str,
    device: str,
    require_grad: bool = False,
):
    """
    Initialize a point cloud that closely follows a boundary of
    a volume with a given size. The volume buffer is initialized as well.
    """

    # generate a 3D point cloud sampled from sides of a [0,1] cube
    xyz, rgb = init_cube_point_cloud(batch_size,
                                     n_points=n_points,
                                     device=device,
                                     rotate_y=True)

    # make volume_size tensor
    volume_size_t = torch.tensor(volume_size,
                                 dtype=xyz.dtype,
                                 device=xyz.device)

    if interp_mode == "trilinear":
        # make the xyz locations fall on the boundary of the
        # first/last two voxels along each spatial dimension of the
        # volume - this properly checks the correctness of the
        # trilinear interpolation scheme
        xyz = (xyz - 0.5) * ((volume_size_t - 2) /
                             (volume_size_t - 1))[[2, 1, 0]] + 0.5

    # rescale the cube pointcloud to overlap with the volume sides
    # of the volume
    rel_scale = volume_size_t / volume_size[0]
    xyz = xyz * rel_scale[[2, 1, 0]][None, None]

    # enable grad accumulation for the differentiability check
    xyz.requires_grad = require_grad
    rgb.requires_grad = require_grad

    # create the pointclouds structure
    pointclouds = Pointclouds(xyz, features=rgb)

    # set the volume translation so that the point cloud is centered
    # around 0
    volume_translation = -0.5 * rel_scale[[2, 1, 0]]

    # set the voxel size to 1 / (volume_size-1)
    volume_voxel_size = 1 / (volume_size[0] - 1.0)

    # instantiate the volumes
    initial_volumes = Volumes(
        features=xyz.new_zeros(batch_size, 3, *volume_size),
        densities=xyz.new_zeros(batch_size, 1, *volume_size),
        volume_translation=volume_translation,
        voxel_size=volume_voxel_size,
    )

    return pointclouds, initial_volumes
    def _test_behind_camera(self, rasterize_points_fn, device, bin_size=None):
        # Test case where all points are behind the camera -- nothing should
        # get rasterized
        N = 2
        P = 32
        xy = torch.randn(N, P, 2)
        z = torch.randn(N, P, 1).abs().mul(-1)  # Make them all negative
        points = torch.cat([xy, z], dim=2).to(device)
        image_size = 16
        points_per_pixel = 3
        radius = 0.2
        idx_expected = torch.full(
            (N, 16, 16, 3), fill_value=-1, dtype=torch.int32, device=device
        )
        zbuf_expected = torch.full(
            (N, 16, 16, 3), fill_value=-1, dtype=torch.float32, device=device
        )
        dists_expected = zbuf_expected.clone()
        pointclouds = Pointclouds(points=points)
        if bin_size == -1:
            # simple python case with no binning
            idx, zbuf, dists = rasterize_points_fn(
                pointclouds, image_size, radius, points_per_pixel
            )
        else:
            idx, zbuf, dists = rasterize_points_fn(
                pointclouds, image_size, radius, points_per_pixel, bin_size
            )
        idx_same = (idx == idx_expected).all().item() == 1
        zbuf_same = (zbuf == zbuf_expected).all().item() == 1

        self.assertTrue(idx_same)
        self.assertTrue(zbuf_same)
        self.assertTrue(torch.allclose(dists, dists_expected))
    def test_simple_sphere(self):
        device = torch.device("cuda:0")
        sphere_mesh = ico_sphere(1, device)
        verts_padded = sphere_mesh.verts_padded()
        # Shift vertices to check coordinate frames are correct.
        verts_padded[..., 1] += 0.2
        verts_padded[..., 0] += 0.2
        pointclouds = Pointclouds(
            points=verts_padded, features=torch.ones_like(verts_padded)
        )
        R, T = look_at_view_transform(2.7, 0.0, 0.0)
        cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = PointsRasterizationSettings(
            image_size=256, radius=5e-2, points_per_pixel=1
        )
        rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
        compositor = NormWeightedCompositor()
        renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)

        # Load reference image
        filename = "simple_pointcloud_sphere.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(pointclouds)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref)
Example #10
0
    def init_spherical_pcl(
        batch_size=3,
        num_points=3000,
        device=None,
        use_pointclouds=False
    ) -> Tuple[Union[torch.Tensor, Pointclouds], torch.Tensor]:
        # random spherical point cloud
        pcl = torch.randn((batch_size, num_points, 3),
                          device=device,
                          dtype=torch.float32)
        pcl = torch.nn.functional.normalize(pcl, dim=2)

        # GT normals are the same as
        # the location of each point on the 0-centered sphere
        normals = pcl.clone()

        # scale and offset the sphere randomly
        pcl *= torch.rand(batch_size, 1, 1).type_as(pcl) + 1.0
        pcl += torch.randn(batch_size, 1, 3).type_as(pcl)

        if use_pointclouds:
            num_points = torch.randint(size=(batch_size, ),
                                       low=int(num_points * 0.7),
                                       high=num_points)
            pcl, normals = [[x[:np] for x, np in zip(X, num_points)]
                            for X in (pcl, normals)]
            pcl = Pointclouds(pcl, normals=normals)

        return pcl, normals
Example #11
0
 def init_point_cloud(
     batch_size=10,
     n_points=1000,
     dim=3,
     device=None,
     use_pointclouds=False,
     random_pcl_size=True,
 ):
     """
     Generate a batch of normally distributed point clouds.
     """
     if use_pointclouds:
         assert dim == 3, "Pointclouds support only 3-dim points."
         # generate a `batch_size` point clouds with number of points
         # between 4 and `n_points`
         if random_pcl_size:
             n_points_per_batch = torch.randint(
                 low=4,
                 high=n_points,
                 size=(batch_size, ),
                 device=device,
                 dtype=torch.int64,
             )
             X_list = [
                 torch.randn(int(n_pt),
                             dim,
                             device=device,
                             dtype=torch.float32)
                 for n_pt in n_points_per_batch
             ]
             X = Pointclouds(X_list)
         else:
             X = torch.randn(
                 batch_size,
                 n_points,
                 dim,
                 device=device,
                 dtype=torch.float32,
             )
             X = Pointclouds(list(X))
     else:
         X = torch.randn(batch_size,
                         n_points,
                         dim,
                         device=device,
                         dtype=torch.float32)
     return X
Example #12
0
    def init_cloud(
        num_clouds: int = 3,
        max_points: int = 100,
        channels: int = 4,
        lists_to_tensors: bool = False,
        with_normals: bool = True,
        with_features: bool = True,
        min_points: int = 0,
    ):
        """
        Function to generate a Pointclouds object of N meshes with
        random number of points.

        Args:
            num_clouds: Number of clouds to generate.
            channels: Number of features.
            max_points: Max number of points per cloud.
            lists_to_tensors: Determines whether the generated clouds should be
                              constructed from lists (=False) or
                              tensors (=True) of points/normals/features.
            with_normals: bool whether to include normals
            with_features: bool whether to include features
            min_points: Min number of points per cloud

        Returns:
            Pointclouds object.
        """
        device = torch.device('cuda:0')
        p = torch.randint(low=min_points, high=max_points, size=(num_clouds,))
        if lists_to_tensors:
            p.fill_(p[0])

        points_list = [
            torch.rand((i, 3), device=device, dtype=torch.float32) for i in p
        ]
        normals_list, features_list = None, None
        if with_normals:
            normals_list = [
                torch.rand((i, 3), device=device, dtype=torch.float32)
                for i in p
            ]
        if with_features:
            features_list = [
                torch.rand((i, channels), device=device, dtype=torch.float32)
                for i in p
            ]

        if lists_to_tensors:
            points_list = torch.stack(points_list)
            if with_normals:
                normals_list = torch.stack(normals_list)
            if with_features:
                features_list = torch.stack(features_list)

        return Pointclouds(
            points_list, normals=normals_list, features=features_list
        )
Example #13
0
 def naive_scale(cloud, scale):
     if not torch.is_tensor(scale):
         scale = torch.full((len(cloud), ), scale, device=cloud.device)
     new_points_list = [
         scale[i] * points.clone()
         for (i, points) in enumerate(cloud.points_list())
     ]
     return Pointclouds(new_points_list, cloud.normals_list(),
                        cloud.features_list())
    def test_padded_to_packed_idx(self):
        device = torch.device("cuda:0")
        points_list = []
        npoints = [10, 20, 30]
        for p in npoints:
            points = torch.rand((p, 3), dtype=torch.float32, device=device)
            points_list.append(points)

        clouds = Pointclouds(points_list)

        padded_to_packed_idx = clouds.padded_to_packed_idx()
        points_packed = clouds.points_packed()
        points_padded = clouds.points_padded()
        points_padded_flat = points_padded.view(-1, 3)

        self.assertClose(points_padded_flat[padded_to_packed_idx], points_packed)

        idx = padded_to_packed_idx.view(-1, 1).expand(-1, 3)
        self.assertClose(points_padded_flat.gather(0, idx), points_packed)
 def naive_offset(clouds, offsets_packed):
     new_points_packed = clouds.points_packed() + offsets_packed
     new_points_list = list(
         new_points_packed.split(clouds.num_points_per_cloud().tolist(), 0)
     )
     return Pointclouds(
         points=new_points_list,
         normals=clouds.normals_list(),
         features=clouds.features_list(),
     )
Example #16
0
    def init_pointclouds(num_clouds: int = 10,
                         num_points: int = 1000) -> Pointclouds:
        device = torch.device("cuda:0")
        points_list = []
        for _ in range(num_clouds):
            points = (torch.rand(
                (num_points, 3), dtype=torch.float32, device=device) * 2.0 -
                      1.0)  # points in the space of [-1, 1]
            points_list.append(points)
        pointclouds = Pointclouds(points=points_list)

        return pointclouds
Example #17
0
    def test_subsample(self):
        lengths = [4, 5, 13, 3]
        points = [torch.rand(length, 3) for length in lengths]
        features = [torch.rand(length, 5) for length in lengths]
        normals = [torch.rand(length, 3) for length in lengths]

        pcl1 = Pointclouds(points=points).cuda()
        self.assertIs(pcl1, pcl1.subsample(13))
        self.assertIs(pcl1, pcl1.subsample([6, 13, 13, 13]))

        lengths_max_4 = torch.tensor([4, 4, 4, 3]).cuda()
        for with_normals, with_features in itertools.product([True, False],
                                                             repeat=2):
            with self.subTest(f"{with_normals} {with_features}"):
                pcl = Pointclouds(
                    points=points,
                    normals=normals if with_normals else None,
                    features=features if with_features else None,
                )
                pcl_copy = pcl.subsample(max_points=4)
                for length, points_ in zip(lengths_max_4,
                                           pcl_copy.points_list()):
                    self.assertEqual(points_.shape, (length, 3))
                if with_normals:
                    for length, normals_ in zip(lengths_max_4,
                                                pcl_copy.normals_list()):
                        self.assertEqual(normals_.shape, (length, 3))
                else:
                    self.assertIsNone(pcl_copy.normals_list())
                if with_features:
                    for length, features_ in zip(lengths_max_4,
                                                 pcl_copy.features_list()):
                        self.assertEqual(features_.shape, (length, 5))
                else:
                    self.assertIsNone(pcl_copy.features_list())

        pcl2 = Pointclouds(points=points)
        pcl_copy2 = pcl2.subsample(lengths_max_4)
        for length, points_ in zip(lengths_max_4, pcl_copy2.points_list()):
            self.assertEqual(points_.shape, (length, 3))
Example #18
0
    def test_compare_coarse_cpu_vs_cuda(self):
        torch.manual_seed(231)
        N = 3
        max_P = 1000
        image_size = (64, 64)
        radius = 0.1
        bin_size = 16
        max_points_per_bin = 500

        # create heterogeneous point clouds
        points = []
        for _ in range(N):
            p = np.random.choice(max_P)
            points.append(torch.randn(p, 3))

        pointclouds = Pointclouds(points=points)
        points_packed = pointclouds.points_packed()
        cloud_to_packed_first_idx = pointclouds.cloud_to_packed_first_idx()
        num_points_per_cloud = pointclouds.num_points_per_cloud()

        radius = torch.full((points_packed.shape[0],), fill_value=radius)
        args = (
            points_packed,
            cloud_to_packed_first_idx,
            num_points_per_cloud,
            image_size,
            radius,
            bin_size,
            max_points_per_bin,
        )
        bp_cpu = _C._rasterize_points_coarse(*args)

        device = get_random_cuda_device()
        pointclouds_cuda = pointclouds.to(device)
        points_packed = pointclouds_cuda.points_packed()
        cloud_to_packed_first_idx = pointclouds_cuda.cloud_to_packed_first_idx()
        num_points_per_cloud = pointclouds_cuda.num_points_per_cloud()
        radius = radius.to(device)
        args = (
            points_packed,
            cloud_to_packed_first_idx,
            num_points_per_cloud,
            image_size,
            radius,
            bin_size,
            max_points_per_bin,
        )
        bp_cuda = _C._rasterize_points_coarse(*args)

        # Bin points might not be the same: CUDA version might write them in
        # any order. But if we sort the non-(-1) elements of the CUDA output
        # then they should be the same.
        for n in range(N):
            for by in range(bp_cpu.shape[1]):
                for bx in range(bp_cpu.shape[2]):
                    K = (bp_cpu[n, by, bx] != -1).sum().item()
                    idxs_cpu = bp_cpu[n, by, bx].tolist()
                    idxs_cuda = bp_cuda[n, by, bx].tolist()
                    idxs_cuda[:K] = sorted(idxs_cuda[:K])
                    self.assertEqual(idxs_cpu, idxs_cuda)
 def forward(self):
     # The Pointclouds object creates copies of it's arguments - that's why
     # we have to create a new object in every forward step.
     pcl = Pointclouds(points=self.vert_pos[None, ...],
                       features=self.vert_col[None, ...])
     return self.renderer(
         pcl,
         gamma=(self.gamma, ),
         zfar=(45.0, ),
         znear=(1.0, ),
         radius_world=True,
         bg_col=torch.ones((3, ), dtype=torch.float32, device=DEVICE),
     )[0]
Example #20
0
    def test_init_error(self):
        # Check if correct errors are raised when verts/faces are on
        # different devices

        clouds = self.init_cloud(10, 100, 5)
        points_list = clouds.points_list()  # all tensors on cuda:0
        points_list = [
            p.to("cpu") if random.uniform(0, 1) > 0.5 else p
            for p in points_list
        ]
        features_list = clouds.features_list()
        normals_list = clouds.normals_list()

        with self.assertRaises(ValueError) as cm:
            Pointclouds(points=points_list,
                        features=features_list,
                        normals=normals_list)
            self.assertTrue("same device" in cm.msg)

        points_list = clouds.points_list()
        features_list = [
            f.to("cpu") if random.uniform(0, 1) > 0.2 else f
            for f in features_list
        ]
        with self.assertRaises(ValueError) as cm:
            Pointclouds(points=points_list,
                        features=features_list,
                        normals=normals_list)
            self.assertTrue("same device" in cm.msg)

        points_padded = clouds.points_padded()  # on cuda:0
        features_padded = clouds.features_padded().to("cpu")
        normals_padded = clouds.normals_padded()

        with self.assertRaises(ValueError) as cm:
            Pointclouds(points=points_padded,
                        features=features_padded,
                        normals=normals_padded)
            self.assertTrue("same device" in cm.msg)
    def test_empty(self):
        N, P, C = 10, 100, 2
        device = torch.device("cuda:0")
        points_list = []
        normals_list = []
        features_list = []
        valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
        for n in range(N):
            if valid[n]:
                p = torch.randint(
                    3, high=P, size=(1,), dtype=torch.int32, device=device
                )[0]
                points = torch.rand((p, 3), dtype=torch.float32, device=device)
                normals = torch.rand((p, 3), dtype=torch.float32, device=device)
                features = torch.rand((p, C), dtype=torch.float32, device=device)
            else:
                points = torch.tensor([], dtype=torch.float32, device=device)
                normals = torch.tensor([], dtype=torch.float32, device=device)
                features = torch.tensor([], dtype=torch.int64, device=device)
            points_list.append(points)
            normals_list.append(normals)
            features_list.append(features)

        for with_normals in (False, True):
            for with_features in (False, True):
                this_features, this_normals = None, None
                if with_normals:
                    this_normals = normals_list
                if with_features:
                    this_features = features_list
                clouds = Pointclouds(
                    points=points_list, normals=this_normals, features=this_features
                )
                points_padded = clouds.points_padded()
                normals_padded = clouds.normals_padded()
                features_padded = clouds.features_padded()
                if not with_normals:
                    self.assertIsNone(normals_padded)
                if not with_features:
                    self.assertIsNone(features_padded)
                points_per_cloud = clouds.num_points_per_cloud()
                for n in range(N):
                    p = len(points_list[n])
                    if p > 0:
                        self.assertClose(points_padded[n, :p, :], points_list[n])
                        if with_normals:
                            self.assertClose(normals_padded[n, :p, :], normals_list[n])
                        if with_features:
                            self.assertClose(
                                features_padded[n, :p, :], features_list[n]
                            )
                        if points_padded.shape[1] > p:
                            self.assertTrue(points_padded[n, p:, :].eq(0).all())
                            if with_normals:
                                self.assertTrue(normals_padded[n, p:, :].eq(0).all())
                            if with_features:
                                self.assertTrue(features_padded[n, p:, :].eq(0).all())
                    self.assertTrue(points_per_cloud[n] == p)
    def test_python_vs_cpu_naive(self):
        torch.manual_seed(231)
        image_size = 32
        radius = 0.1
        points_per_pixel = 3

        # Test a batch of homogeneous point clouds.
        N = 2
        P = 17
        points = torch.randn(N, P, 3, requires_grad=True)
        pointclouds = Pointclouds(points=points)
        args = (pointclouds, image_size, radius, points_per_pixel)
        self._compare_impls(
            rasterize_points_python,
            rasterize_points,
            args,
            args,
            points,
            points,
            compare_grads=True,
        )

        # Test a batch of heterogeneous point clouds.
        P2 = 10
        points1 = torch.randn(P, 3, requires_grad=True)
        points2 = torch.randn(P2, 3)
        pointclouds = Pointclouds(points=[points1, points2])
        args = (pointclouds, image_size, radius, points_per_pixel)
        self._compare_impls(
            rasterize_points_python,
            rasterize_points,
            args,
            args,
            points1,  # check gradients for first element in batch
            points1,
            compare_grads=True,
        )
Example #23
0
    def test_radius_format_failure(self):
        N = 20
        P_max = 15
        points_list = []
        for _ in range(N):
            p = torch.randint(low=1, high=P_max, size=(1,))[0]
            points_list.append(torch.randn((p, 3)))

        points = Pointclouds(points=points_list)

        # Incorrect shape
        with self.assertRaisesRegex(ValueError, "radius must be of shape"):
            _format_radius([0, 1, 2], points)

        # Incorrect type
        with self.assertRaisesRegex(ValueError, "float, list, tuple or tensor"):
            _format_radius({0: [0, 1, 2]}, points)
 def forward(self):
     # The Pointclouds object creates copies of it's arguments - that's why
     # we have to create a new object in every forward step.
     pcl = Pointclouds(points=self.vert_pos[None, ...],
                       features=self.vert_col[None, ...])
     return self.renderer(
         pcl,
         gamma=(self.gamma, ),
         zfar=(45.0, ),
         znear=(1.0, ),
         radius_world=True,
         bg_col=torch.ones((3, ), dtype=torch.float32, device=DEVICE),
         # As mentioned above: workaround for device placement of gradients for
         # camera parameters.
         focal_length=self.focal_length,
         R=self.cam_rot[None, ...],
         T=self.cam_pos[None, ...],
     )[0]
Example #25
0
 def test_simple_sphere_pulsar(self):
     for device in [torch.device("cpu"), torch.device("cuda")]:
         sphere_mesh = ico_sphere(1, device)
         verts_padded = sphere_mesh.verts_padded()
         # Shift vertices to check coordinate frames are correct.
         verts_padded[..., 1] += 0.2
         verts_padded[..., 0] += 0.2
         pointclouds = Pointclouds(
             points=verts_padded, features=torch.ones_like(verts_padded)
         )
         for azimuth in [0.0, 90.0]:
             R, T = look_at_view_transform(2.7, 0.0, azimuth)
             for camera_name, cameras in [
                 ("fovperspective", FoVPerspectiveCameras(device=device, R=R, T=T)),
                 (
                     "fovorthographic",
                     FoVOrthographicCameras(device=device, R=R, T=T),
                 ),
                 ("perspective", PerspectiveCameras(device=device, R=R, T=T)),
                 ("orthographic", OrthographicCameras(device=device, R=R, T=T)),
             ]:
                 raster_settings = PointsRasterizationSettings(
                     image_size=256, radius=5e-2, points_per_pixel=1
                 )
                 rasterizer = PointsRasterizer(
                     cameras=cameras, raster_settings=raster_settings
                 )
                 renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
                 # Load reference image
                 filename = (
                     "pulsar_simple_pointcloud_sphere_"
                     f"azimuth{azimuth}_{camera_name}.png"
                 )
                 image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
                 images = renderer(
                     pointclouds, gamma=(1e-3,), znear=(1.0,), zfar=(100.0,)
                 )
                 rgb = images[0, ..., :3].squeeze().cpu()
                 if DEBUG:
                     filename = "DEBUG_%s" % filename
                     Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                         DATA_DIR / filename
                     )
                 self.assertClose(rgb, image_ref, rtol=7e-3, atol=5e-3)
Example #26
0
    def test_unscaled(self):
        D = 5
        P = 1000
        B, C, H, W = 2, 3, D, D
        densities = torch.zeros(B, 1, D, H, W)
        features = torch.zeros(B, C, D, H, W)
        volumes = Volumes(densities=densities, features=features)
        points = torch.rand(B, 1000, 3) * (D - 1) - ((D - 1) * 0.5)
        point_features = torch.rand(B, 1000, C)
        pointclouds = Pointclouds(points=points, features=point_features)

        volumes2 = add_pointclouds_to_volumes(pointclouds,
                                              volumes,
                                              rescale_features=False)
        self.assertConstant(volumes2.densities().sum([2, 3, 4]) / P,
                            1,
                            atol=1e-5)
        self.assertConstant(volumes2.features().sum([2, 3, 4]) / P,
                            0.5,
                            atol=0.03)
Example #27
0
def _apply_pcl_transformation(X, R, T, s=None):
    """
    Apply a batch of similarity/rigid transformations, parametrized with
    rotation `R`, translation `T` and scale `s`, to an input batch of
    point clouds `X`.
    """
    if isinstance(X, Pointclouds):
        num_points = X.num_points_per_cloud()
        X_t = X.points_padded()
    else:
        X_t = X

    if s is not None:
        X_t = s[:, None, None] * X_t

    X_t = torch.bmm(X_t, R) + T[:, None, :]

    if isinstance(X, Pointclouds):
        X_list = [x[:n_p] for x, n_p in zip(X_t, num_points)]
        X_t = Pointclouds(X_list)

    return X_t
Example #28
0
    def test_simple(self):
        device = torch.device('cuda:0')
        points = [
            torch.tensor(
                [[0.1, 0.3, 0.5], [0.5, 0.2, 0.1], [0.6, 0.8, 0.7]],
                dtype=torch.float32,
                device=device,
            ),
            torch.tensor(
                [
                    [0.1, 0.3, 0.3],
                    [0.6, 0.7, 0.8],
                    [0.2, 0.3, 0.4],
                    [0.1, 0.5, 0.3],
                ],
                dtype=torch.float32,
                device=device,
            ),
            torch.tensor(
                [
                    [0.7, 0.3, 0.6],
                    [0.2, 0.4, 0.8],
                    [0.9, 0.5, 0.2],
                    [0.2, 0.3, 0.4],
                    [0.9, 0.3, 0.8],
                ],
                dtype=torch.float32,
                device=device,
            ),
        ]
        clouds = Pointclouds(points)

        self.assertClose(
            (clouds.packed_to_cloud_idx()).cpu(),
            torch.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]),
        )
        self.assertClose(
            clouds.cloud_to_packed_first_idx().cpu(), torch.tensor([0, 3, 7])
        )
        self.assertClose(
            clouds.num_points_per_cloud().cpu(), torch.tensor([3, 4, 5])
        )
        self.assertClose(
            clouds.padded_to_packed_idx().cpu(),
            torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),
        )
    def test_all_constructions(self):
        public_getters = [
            "points_list",
            "points_packed",
            "packed_to_cloud_idx",
            "cloud_to_packed_first_idx",
            "num_points_per_cloud",
            "points_padded",
            "padded_to_packed_idx",
        ]
        public_normals_getters = ["normals_list", "normals_packed", "normals_padded"]
        public_features_getters = [
            "features_list",
            "features_packed",
            "features_padded",
        ]

        lengths = [3, 4, 2]
        max_len = max(lengths)
        C = 4

        points_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths]
        normals_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths]
        features_data = [torch.zeros((max_len, C)).uniform_() for i in lengths]
        for length, p, n, f in zip(lengths, points_data, normals_data, features_data):
            p[length:] = 0.0
            n[length:] = 0.0
            f[length:] = 0.0
        points_list = [d[:length] for length, d in zip(lengths, points_data)]
        normals_list = [d[:length] for length, d in zip(lengths, normals_data)]
        features_list = [d[:length] for length, d in zip(lengths, features_data)]
        points_packed = torch.cat(points_data)
        normals_packed = torch.cat(normals_data)
        features_packed = torch.cat(features_data)
        test_cases_inputs = [
            ("list_0_0", points_list, None, None),
            ("list_1_0", points_list, normals_list, None),
            ("list_0_1", points_list, None, features_list),
            ("list_1_1", points_list, normals_list, features_list),
            ("padded_0_0", points_data, None, None),
            ("padded_1_0", points_data, normals_data, None),
            ("padded_0_1", points_data, None, features_data),
            ("padded_1_1", points_data, normals_data, features_data),
            ("emptylist_emptylist_emptylist", [], [], []),
        ]
        false_cases_inputs = [
            ("list_packed", points_list, normals_packed, features_packed, ValueError),
            ("packed_0", points_packed, None, None, ValueError),
        ]

        for name, points, normals, features in test_cases_inputs:
            with self.subTest(name=name):
                p = Pointclouds(points, normals, features)
                for method in public_getters:
                    self.assertIsNotNone(getattr(p, method)())
                for method in public_normals_getters:
                    if normals is None or p.isempty():
                        self.assertIsNone(getattr(p, method)())
                for method in public_features_getters:
                    if features is None or p.isempty():
                        self.assertIsNone(getattr(p, method)())

        for name, points, normals, features, error in false_cases_inputs:
            with self.subTest(name=name):
                with self.assertRaises(error):
                    Pointclouds(points, normals, features)
    def test_pointcloud_with_features(self):
        device = torch.device("cuda:0")
        file_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
        pointcloud_filename = file_dir / "PittsburghBridge/pointcloud.npz"

        # Note, this file is too large to check in to the repo.
        # Download the file to run the test locally.
        if not path.exists(pointcloud_filename):
            url = "https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz"
            msg = (
                "pointcloud.npz not found, download from %s, save it at the path %s, and rerun"
                % (url, pointcloud_filename)
            )
            warnings.warn(msg)
            return True

        # Load point cloud
        pointcloud = np.load(pointcloud_filename)
        verts = torch.Tensor(pointcloud["verts"]).to(device)
        rgb_feats = torch.Tensor(pointcloud["rgb"]).to(device)

        verts.requires_grad = True
        rgb_feats.requires_grad = True
        point_cloud = Pointclouds(points=[verts], features=[rgb_feats])

        R, T = look_at_view_transform(20, 10, 0)
        cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)

        raster_settings = PointsRasterizationSettings(
            # Set image_size so it is not a multiple of 16 (min bin_size)
            # in order to confirm that there are no errors in coarse rasterization.
            image_size=500,
            radius=0.003,
            points_per_pixel=10,
        )

        renderer = PointsRenderer(
            rasterizer=PointsRasterizer(
                cameras=cameras, raster_settings=raster_settings
            ),
            compositor=AlphaCompositor(),
        )

        images = renderer(point_cloud)

        # Load reference image
        filename = "bridge_pointcloud.png"
        image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(point_cloud)
            rgb = images[0, ..., :3].squeeze().cpu()
            if DEBUG:
                filename = "DEBUG_%s" % filename
                Image.fromarray((rgb.detach().numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / filename
                )
            self.assertClose(rgb, image_ref, atol=0.015)

        # Check grad exists.
        grad_images = torch.randn_like(images)
        images.backward(grad_images)
        self.assertIsNotNone(verts.grad)
        self.assertIsNotNone(rgb_feats.grad)