Exemplo n.º 1
0
    def _compute_variance_and_detMk(self, pointclouds, **kwargs):
        """
        Compute the projected kernel variance Vk'+I Eq.(35) in [2],
        J V_k^r J^T + I Eq.(7) in [1]
        Args:
            pointclouds (PointClouds3D): point clouds in object coordinates
        Returns:
            variance (tensor): (N, 2, 2)
            detMk (tensor): (N, 1) determinant of Mk
        """
        raster_settings = kwargs.get("raster_settings", self.raster_settings)

        WJk = self._compute_WJk(pointclouds, **kwargs)
        totalP = WJk.shape[0]

        if raster_settings.Vrk_invariant:
            Vrk, Sk = self._compute_global_Vrk(pointclouds, **kwargs)
        elif raster_settings.Vrk_isotropic:
            # (N, 3, 3)
            Vrk, Sk = self._compute_isotropic_Vrk(pointclouds, **kwargs)
        else:
            Vrk, Sk = self._compute_anisotropic_Vrk(pointclouds)

        Mk = Sk @ WJk
        Vk = WJk.transpose(1, 2) @ Vrk @ WJk

        # low-pass filter +sigma*I
        # NOTE: [2] is in pixel space, but we are in NDC space, so the variance should be
        # scaled by pixel_size
        pixel_size = 2.0 / raster_settings.image_size
        variance = Vk + raster_settings.antialiasing_sigma * \
            ops3d.eyes(2, totalP, device=Vk.device,
                       dtype=Vk.dtype) * (pixel_size**2)

        detMk = torch.det(Mk)

        return variance, detMk
Exemplo n.º 2
0
            mask = fragments.pix_to_face[..., :1] >= 0
            mask_imgs = mask.to(dtype=torch.uint8) * 255

            # use hard alpha values
            images = torch.cat([images[..., :3], mask.float()], dim=-1)
            dense_depths = cams.zfar.view(-1, 1, 1,
                                          1).clone().expand_as(mask_imgs)
            dense_depths = torch.where(mask, fragments.zbuf[..., :1],
                                       dense_depths)

            # cameras
            camera_mat = cams.get_projection_transform().get_matrix().cpu()
            world_mat = cams.get_world_to_view_transform().get_matrix().cpu()
            id_mat = np.eye(4)
            # DVR scales x,y and does the projection step manually (/z)
            dvr_camera_mat = eyes(4, camera_mat.shape[0]).to(camera_mat.device)
            dvr_camera_mat[:, :2, :2] = camera_mat[:, :2, :2]
            # dense depth read from rasterizer
            for b in range(images.shape[0]):
                # save camera data
                data_dict['camera_mat'][idx, ...] = world_mat[b]
                data_dict['lights_%d' %
                          idx] = convert_tensor_property_to_value_dict(lights)

                # DVR camera data
                cameras_dict['world_mat_%d' % idx] = world_mat[b].transpose(
                    0, 1)
                cameras_dict['scale_mat_%d' % idx] = id_mat
                cameras_dict['camera_mat_%d' %
                             idx] = dvr_camera_mat[b].transpose(0, 1)
                # save dense depth
Exemplo n.º 3
0
    def test_opencv_conversion(self):
        """
        Tests that the cameras converted from opencv to pytorch3d convention
        return correct projections of random 3D points. The check is done
        against a set of results precomuted using `cv2.projectPoints` function.
        """
        device = torch.device("cuda:0")
        image_size = [[480, 640]] * 4
        R = [
            [
                [1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 0.0, 1.0],
            ],
            [
                [1.0, 0.0, 0.0],
                [0.0, 0.0, -1.0],
                [0.0, 1.0, 0.0],
            ],
            [
                [0.0, 0.0, 1.0],
                [1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
            ],
            [
                [0.0, 0.0, 1.0],
                [1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
            ],
        ]

        tvec = [
            [0.0, 0.0, 3.0],
            [0.3, -0.3, 3.0],
            [-0.15, 0.1, 4.0],
            [0.0, 0.0, 4.0],
        ]
        focal_length = [
            [100.0, 100.0],
            [115.0, 115.0],
            [105.0, 105.0],
            [120.0, 120.0],
        ]
        # These values are in y, x format, but they should be in x, y format.
        # The tests work like this because they only test for consistency,
        # but this format is misleading.
        principal_point = [
            [240, 320],
            [240.5, 320.3],
            [241, 318],
            [242, 322],
        ]

        principal_point, focal_length, R, tvec, image_size = [
            torch.tensor(x, device=device)
            for x in (principal_point, focal_length, R, tvec, image_size)
        ]
        camera_matrix = eyes(dim=3, N=4, device=device)
        camera_matrix[:, 0, 0], camera_matrix[:, 1, 1] = (
            focal_length[:, 0],
            focal_length[:, 1],
        )
        camera_matrix[:, :2, 2] = principal_point

        pts = torch.nn.functional.normalize(torch.randn(4,
                                                        1000,
                                                        3,
                                                        device=device),
                                            dim=-1)

        # project the 3D points with the opencv projection function
        rvec = so3_log_map(R)
        pts_proj_opencv = cv2_project_points(pts, rvec, tvec, camera_matrix)

        # make the pytorch3d cameras
        cameras_opencv_to_pytorch3d = cameras_from_opencv_projection(
            R, tvec, camera_matrix, image_size)
        self.assertEqual(cameras_opencv_to_pytorch3d.device, device)

        # project the 3D points with converted cameras to screen space.
        pts_proj_pytorch3d_screen = cameras_opencv_to_pytorch3d.transform_points_screen(
            pts)[..., :2]

        # compare to the cached projected points
        self.assertClose(pts_proj_opencv, pts_proj_pytorch3d_screen, atol=1e-5)

        # Check the inverse.
        R_i, tvec_i, camera_matrix_i = opencv_from_cameras_projection(
            cameras_opencv_to_pytorch3d, image_size)
        self.assertClose(R, R_i)
        self.assertClose(tvec, tvec_i)
        self.assertClose(camera_matrix, camera_matrix_i)
Exemplo n.º 4
0
    def test_pulsar_conversion(self):
        """
        Tests that the cameras converted from opencv to pulsar convention
        return correct projections of random 3D points. The check is done
        against a set of results precomputed using `cv2.projectPoints` function.
        """
        image_size = [[480, 640]]
        R = [
            [
                [1.0, 0.0, 0.0],
                [0.0, 1.0, 0.0],
                [0.0, 0.0, 1.0],
            ],
            [
                [0.1968, -0.6663, -0.7192],
                [0.7138, -0.4055, 0.5710],
                [-0.6721, -0.6258, 0.3959],
            ],
        ]
        tvec = [
            [10.0, 10.0, 3.0],
            [-0.0, -0.0, 20.0],
        ]
        focal_length = [
            [100.0, 100.0],
            [10.0, 10.0],
        ]
        principal_point = [
            [320, 240],
            [320, 240],
        ]

        principal_point, focal_length, R, tvec, image_size = [
            torch.FloatTensor(x)
            for x in (principal_point, focal_length, R, tvec, image_size)
        ]
        camera_matrix = eyes(dim=3, N=2)
        camera_matrix[:, 0, 0] = focal_length[:, 0]
        camera_matrix[:, 1, 1] = focal_length[:, 1]
        camera_matrix[:, :2, 2] = principal_point
        rvec = so3_log_map(R)
        pts = torch.tensor([[[0.0, 0.0, 120.0]], [[0.0, 0.0, 120.0]]],
                           dtype=torch.float32)
        radii = torch.tensor([[1e-5], [1e-5]], dtype=torch.float32)
        col = torch.zeros((2, 1, 1), dtype=torch.float32)

        # project the 3D points with the opencv projection function
        pts_proj_opencv = cv2_project_points(pts, rvec, tvec, camera_matrix)
        pulsar_cam = pulsar_from_opencv_projection(R,
                                                   tvec,
                                                   camera_matrix,
                                                   image_size,
                                                   znear=100.0)
        pulsar_rend = PulsarRenderer(640,
                                     480,
                                     1,
                                     right_handed_system=False,
                                     n_channels=1)
        rendered = torch.flip(
            pulsar_rend(
                pts,
                col,
                radii,
                pulsar_cam,
                1e-5,
                max_depth=150.0,
                min_depth=100.0,
            ),
            dims=(1, ),
        )
        for batch_id in range(2):
            point_pos = torch.where(
                rendered[batch_id] == rendered[batch_id].min())
            point_pos = point_pos[1][0], point_pos[0][0]
            self.assertLess(
                torch.abs(point_pos[0] - pts_proj_opencv[batch_id, 0, 0]), 2)
            self.assertLess(
                torch.abs(point_pos[1] - pts_proj_opencv[batch_id, 0, 1]), 2)
Exemplo n.º 5
0
    def _check_raysampler_ray_directions(self, cameras, raysampler, ray_bundle):
        """
        Check the rays_directions_world output of raysamplers.
        """

        batch_size = cameras.R.shape[0]
        n_pts_per_ray = ray_bundle.lengths.shape[-1]
        spatial_size = ray_bundle.xys.shape[1:-1]
        n_rays_per_image = spatial_size.numel()

        # obtain the ray points in world coords
        rays_points_world = cameras.unproject_points(
            torch.cat(
                (
                    ray_bundle.xys.view(batch_size, n_rays_per_image, 1, 2).expand(
                        batch_size, n_rays_per_image, n_pts_per_ray, 2
                    ),
                    ray_bundle.lengths.view(
                        batch_size, n_rays_per_image, n_pts_per_ray, 1
                    ),
                ),
                dim=-1,
            ).view(batch_size, -1, 3)
        ).view(batch_size, -1, n_pts_per_ray, 3)

        # reshape to common testing size
        rays_directions_world_normed = torch.nn.functional.normalize(
            ray_bundle.directions.view(batch_size, -1, 3), dim=-1
        )

        # check that the l2-normed difference of all consecutive planes
        # of points in world coords matches ray_directions_world
        rays_directions_world_ = torch.nn.functional.normalize(
            rays_points_world[:, :, -1:] - rays_points_world[:, :, :-1], dim=-1
        )
        self.assertClose(
            rays_directions_world_normed[:, :, None].expand_as(rays_directions_world_),
            rays_directions_world_,
            atol=1e-4,
        )

        # check the ray directions rotated using camera rotation matrix
        # match the ray directions of a camera with trivial extrinsics
        cameras_trivial_extrinsic = cameras.clone()
        cameras_trivial_extrinsic.R = eyes(
            N=batch_size, dim=3, dtype=cameras.R.dtype, device=cameras.device
        )
        cameras_trivial_extrinsic.T = torch.zeros_like(cameras.T)

        # make sure we get the same random rays in case we call the
        # MonteCarloRaysampler twice below
        with torch.random.fork_rng(devices=range(torch.cuda.device_count())):
            torch.random.manual_seed(42)
            ray_bundle_world_fix_seed = raysampler(cameras=cameras)
            torch.random.manual_seed(42)
            ray_bundle_camera_fix_seed = raysampler(cameras=cameras_trivial_extrinsic)

        rays_directions_camera_fix_seed_ = Rotate(
            cameras.R, device=cameras.R.device
        ).transform_points(ray_bundle_world_fix_seed.directions.view(batch_size, -1, 3))

        self.assertClose(
            rays_directions_camera_fix_seed_,
            ray_bundle_camera_fix_seed.directions.view(batch_size, -1, 3),
            atol=1e-5,
        )
Exemplo n.º 6
0
    def test_ndc_grid_sample_rendering(self):
        """
        Use PyTorch3D point renderer to render a colored point cloud, then
        sample the image at the locations of the point projections with
        `ndc_grid_sample`. Finally, assert that the sampled colors are equal to the
        original point cloud colors.

        Note that, in order to ensure correctness, we use a nearest-neighbor
        assignment point renderer (i.e. no soft splatting).
        """

        # generate a bunch of 3D points on a regular grid lying in the z-plane
        n_grid_pts = 10
        grid_scale = 0.9
        z_plane = 2.0
        image_size = [128, 128]
        point_radius = 0.015
        n_pts = n_grid_pts * n_grid_pts
        pts = torch.stack(
            meshgrid_ij([torch.linspace(-grid_scale, grid_scale, n_grid_pts)] *
                        2, ),
            dim=-1,
        )
        pts = torch.cat([pts, z_plane * torch.ones_like(pts[..., :1])], dim=-1)
        pts = pts.reshape(1, n_pts, 3)

        # color the points randomly
        pts_colors = torch.rand(1, n_pts, 3)

        # make trivial rendering cameras
        cameras = PerspectiveCameras(
            R=eyes(dim=3, N=1),
            device=pts.device,
            T=torch.zeros(1, 3, dtype=torch.float32, device=pts.device),
        )

        # render the point cloud
        pcl = Pointclouds(points=pts, features=pts_colors)
        renderer = NearestNeighborPointsRenderer(
            rasterizer=PointsRasterizer(
                cameras=cameras,
                raster_settings=PointsRasterizationSettings(
                    image_size=image_size,
                    radius=point_radius,
                    points_per_pixel=1,
                ),
            ),
            compositor=AlphaCompositor(),
        )
        im_render = renderer(pcl)

        # sample the render at projected pts
        pts_proj = cameras.transform_points(pcl.points_padded())[..., :2]
        pts_colors_sampled = ndc_grid_sample(
            im_render,
            pts_proj,
            mode="nearest",
            align_corners=False,
        ).permute(0, 2, 1)

        # assert that the samples are the same as original points
        self.assertClose(pts_colors, pts_colors_sampled, atol=1e-4)