def test_to_euler(self):
     """mtx -> euler -> mtx"""
     data = random_rotations(13, dtype=torch.float64)
     for convention in self._all_euler_angle_conventions():
         euler_angles = matrix_to_euler_angles(data, convention)
         mdata = euler_angles_to_matrix(euler_angles, convention)
         self.assertTrue(torch.allclose(data, mdata))
Esempio n. 2
0
    def _testcase_from_2d(self, y, print_stats, benchmark, skip_q=False):
        x_cam = torch.cat((y, torch.rand_like(y[:, :1]) * 2.0 + 3.5), dim=1)
        x_cam[:, :2] *= x_cam[:, 2:]  # unproject

        R = rotation_conversions.random_rotations(16).to(y)
        T = torch.randn_like(R[:, :1, :])
        x_world = torch.matmul(x_cam - T, R.transpose(1, 2))

        if print_stats:
            print('Run without noise')

        if benchmark:  # return curried call
            torch.cuda.synchronize()

            def result():
                self._run_and_print(x_world, y, R, T, False, skip_q)
                torch.cuda.synchronize()

            return result

        self._run_and_print(
            x_world, y, R, T, print_stats, skip_q, check_output=True
        )

        # in the noisy case, there are no guarantees, so we check it doesn't crash
        if print_stats:
            print('Run with noise')
        x_world += torch.randn_like(x_world) * 0.1
        self._run_and_print(x_world, y, R, T, print_stats, skip_q)
Esempio n. 3
0
    def random_rotation(batch_size, dim, device=None):
        """
        Generates a batch of random `dim`-dimensional rotation matrices.
        """
        if dim == 3:
            R = rotation_conversions.random_rotations(batch_size,
                                                      device=device)
        else:
            # generate random rotation matrices with orthogonalization of
            # random normal square matrices, followed by a transformation
            # that ensures determinant(R)==1
            H = torch.randn(batch_size,
                            dim,
                            dim,
                            dtype=torch.float32,
                            device=device)
            U, _, V = torch.svd(H)
            E = torch.eye(dim, dtype=torch.float32,
                          device=device)[None].repeat(batch_size, 1, 1)
            E[:, -1, -1] = torch.det(torch.bmm(U, V.transpose(2, 1)))
            R = torch.bmm(torch.bmm(U, E), V.transpose(2, 1))
            assert torch.allclose(torch.det(R),
                                  R.new_ones(batch_size),
                                  atol=1e-4)

        return R
    def test_6d(self):
        """Converting to 6d and back"""
        r = random_rotations(13, dtype=torch.float64)

        # 6D representation is not unique,
        # but we implement it by taking the first two rows of the matrix
        r6d = matrix_to_rotation_6d(r)
        self.assertClose(r6d, r[:, :2, :].reshape(-1, 6))

        # going to 6D and back should not change the matrix
        r_hat = rotation_6d_to_matrix(r6d)
        self.assertClose(r_hat, r)

        # moving the second row R2 in the span of (R1, R2) should not matter
        r6d[:, 3:] += 2 * r6d[:, :3]
        r6d[:, :3] *= 3.0
        r_hat = rotation_6d_to_matrix(r6d)
        self.assertClose(r_hat, r)

        # check that we map anything to a valid rotation
        r6d = torch.rand(13, 6)
        r6d[:4, :] *= 3.0
        r6d[4:8, :] -= 0.5
        r = rotation_6d_to_matrix(r6d)
        self.assertClose(torch.matmul(r, r.permute(0, 2, 1)),
                         torch.eye(3).expand_as(r),
                         atol=1e-6)
Esempio n. 5
0
    def test_raysampler_caching(self, batch_size=10):
        """
        Tests the consistency of the NeRF raysampler caching.
        """

        raysampler = NeRFRaysampler(
            min_x=0.0,
            max_x=10.0,
            min_y=0.0,
            max_y=10.0,
            n_pts_per_ray=10,
            min_depth=0.1,
            max_depth=10.0,
            n_rays_per_image=12,
            image_width=10,
            image_height=10,
            stratified=False,
            stratified_test=False,
            invert_directions=True,
        )

        raysampler.eval()

        cameras, rays = [], []

        for _ in range(batch_size):

            R = random_rotations(1)
            T = torch.randn(1, 3)
            focal_length = torch.rand(1, 2) + 0.5
            principal_point = torch.randn(1, 2)

            camera = PerspectiveCameras(
                focal_length=focal_length,
                principal_point=principal_point,
                R=R,
                T=T,
            )

            cameras.append(camera)
            rays.append(raysampler(camera))

        raysampler.precache_rays(cameras, list(range(batch_size)))

        for cam_index, rays_ in enumerate(rays):
            rays_cached_ = raysampler(
                cameras=cameras[cam_index],
                chunksize=None,
                chunk_idx=0,
                camera_hash=cam_index,
                caching=False,
            )

            for v, v_cached in zip(rays_, rays_cached_):
                self.assertTrue(torch.allclose(v, v_cached))
Esempio n. 6
0
 def test_K(self, batch_size=10):
     T = torch.randn(batch_size, 3)
     R = random_rotations(batch_size)
     K = torch.randn(batch_size, 4, 4)
     for cam_type in (
             FoVOrthographicCameras,
             FoVPerspectiveCameras,
             OrthographicCameras,
             PerspectiveCameras,
     ):
         cam = cam_type(R=R, T=T, K=K)
         cam.get_projection_transform()
Esempio n. 7
0
    def test_probabilistic_raysampler(self, batch_size=1, n_pts_per_ray=60):
        """
        Check that the probabilistic ray sampler does not crash for various
        settings.
        """

        raysampler_grid = NeRFRaysampler(
            min_x=0.0,
            max_x=10.0,
            min_y=0.0,
            max_y=10.0,
            n_pts_per_ray=n_pts_per_ray,
            min_depth=1.0,
            max_depth=10.0,
            n_rays_per_image=12,
            image_width=10,
            image_height=10,
            stratified=False,
            stratified_test=False,
            invert_directions=True,
        )

        R = random_rotations(batch_size)
        T = torch.randn(batch_size, 3)
        focal_length = torch.rand(batch_size, 2) + 0.5
        principal_point = torch.randn(batch_size, 2)
        camera = PerspectiveCameras(
            focal_length=focal_length,
            principal_point=principal_point,
            R=R,
            T=T,
        )

        raysampler_grid.eval()

        ray_bundle = raysampler_grid(cameras=camera)

        ray_weights = torch.rand_like(ray_bundle.lengths)

        # Just check that we dont crash for all possible settings.
        for stratified_test in (True, False):
            for stratified in (True, False):
                raysampler_prob = ProbabilisticRaysampler(
                    n_pts_per_ray=n_pts_per_ray,
                    stratified=stratified,
                    stratified_test=stratified_test,
                    add_input_samples=True,
                )
                for mode in ("train", "eval"):
                    getattr(raysampler_prob, mode)()
                    for _ in range(10):
                        raysampler_prob(ray_bundle, ray_weights)
Esempio n. 8
0
 def _generate_epnp_test_from_2d(cls, y):
     """
     Instantiate random x_world, x_cam, R, T given a set of input
     2D projections y.
     """
     batch_size = y.shape[0]
     x_cam = torch.cat((y, torch.rand_like(y[:, :, :1]) * 2.0 + 3.5), dim=2)
     x_cam[:, :, :2] *= x_cam[:, :, 2:]  # unproject
     R = rotation_conversions.random_rotations(batch_size).to(y)
     T = torch.randn_like(R[:, :1, :])
     T[:, :, 2] = (T[:, :, 2] + 3.0).clamp(2.0)
     x_world = torch.matmul(x_cam - T, R.transpose(1, 2))
     return x_cam, x_world, R, T
Esempio n. 9
0
    def _test_degenerate_eigenvalues(self, device):
        """
        Test degenerate eigenvalues like zero-valued and with 2-/3-multiplicity
        """
        # Error tolerances for degenerate values are increased as things might become
        #  numerically unstable
        deg_atol = 1e-3
        deg_rtol = 1.0

        # Construct random orthonormal sets
        test_eigenvecs = random_rotations(n=self.TEST_BATCH_SIZE, device=device)

        # Construct random eigenvalues
        test_eigenvals = torch.randn(
            (self.TEST_BATCH_SIZE, 3), device=test_eigenvecs.device
        )
        self._test_eigenvalues_and_eigenvectors(
            test_eigenvecs, test_eigenvals, atol=deg_atol, rtol=deg_rtol
        )

        # First eigenvalue is always 0.0 here: [0.0 X Y]
        test_eigenvals_with_zero = test_eigenvals.clone()
        test_eigenvals_with_zero[..., 0] = 0.0
        self._test_eigenvalues_and_eigenvectors(
            test_eigenvecs, test_eigenvals_with_zero, atol=deg_atol, rtol=deg_rtol
        )

        # First two eigenvalues are always the same here: [X X Y]
        test_eigenvals_with_multiplicity2 = test_eigenvals.clone()
        test_eigenvals_with_multiplicity2[..., 1] = test_eigenvals_with_multiplicity2[
            ..., 0
        ]
        self._test_eigenvalues_and_eigenvectors(
            test_eigenvecs,
            test_eigenvals_with_multiplicity2,
            atol=deg_atol,
            rtol=deg_rtol,
        )

        # All three eigenvalues are the same here: [X X X]
        test_eigenvals_with_multiplicity3 = test_eigenvals_with_multiplicity2.clone()
        test_eigenvals_with_multiplicity3[..., 2] = test_eigenvals_with_multiplicity2[
            ..., 0
        ]
        self._test_eigenvalues_and_eigenvectors(
            test_eigenvecs,
            test_eigenvals_with_multiplicity3,
            atol=deg_atol,
            rtol=deg_rtol,
        )
Esempio n. 10
0
    def test_corresponding_cameras_alignment(self):
        """
        Checks the corresponding_cameras_alignment function.
        """
        self.skipTest("Temporarily disabled pending investigation")
        device = torch.device("cuda:0")

        # try few different random setups
        for _ in range(3):
            for estimate_scale in (True, False):
                # init true alignment transform
                R_align_gt = random_rotations(1, device=device)[0]
                T_align_gt = torch.randn(3, dtype=torch.float32, device=device)

                # init true scale
                if estimate_scale:
                    s_align_gt = torch.randn(1,
                                             dtype=torch.float32,
                                             device=device).exp()
                else:
                    s_align_gt = torch.tensor(1.0,
                                              dtype=torch.float32,
                                              device=device)

                for cam_type in (
                        SfMOrthographicCameras,
                        OpenGLPerspectiveCameras,
                        OpenGLOrthographicCameras,
                        SfMPerspectiveCameras,
                ):
                    # try well-determined and underdetermined cases
                    for batch_size in (10, 4, 3, 2, 1):
                        # get random cameras
                        cameras = init_random_cameras(cam_type,
                                                      batch_size,
                                                      random_z=True).to(device)
                        # try all alignment modes
                        for mode in ("extrinsics", "centers"):
                            # try different noise levels
                            for add_noise in (0.0, 0.01, 1e-4):
                                self._corresponding_cameras_alignment_test_case(
                                    cameras,
                                    R_align_gt,
                                    T_align_gt,
                                    s_align_gt,
                                    estimate_scale,
                                    mode,
                                    add_noise,
                                )
Esempio n. 11
0
 def test_get_camera_center(self, batch_size=10):
     T = torch.randn(batch_size, 3)
     R = random_rotations(batch_size)
     for cam_type in (
             OpenGLPerspectiveCameras,
             OpenGLOrthographicCameras,
             SfMOrthographicCameras,
             SfMPerspectiveCameras,
             FoVOrthographicCameras,
             FoVPerspectiveCameras,
             OrthographicCameras,
             PerspectiveCameras,
     ):
         cam = cam_type(R=R, T=T)
         C = cam.get_camera_center()
         C_ = -torch.bmm(R, T[:, :, None])[:, :, 0]
         self.assertTrue(torch.allclose(C, C_, atol=1e-05))
Esempio n. 12
0
 def init_transform(batch_size: int = 10):
     """
     Initialize a list of `batch_size` 4x4 SE(3) transforms.
     """
     device = torch.device("cuda:0")
     transform = torch.zeros(batch_size,
                             4,
                             4,
                             dtype=torch.float32,
                             device=device)
     transform[:, :3, :3] = random_rotations(batch_size,
                                             dtype=torch.float32,
                                             device=device)
     transform[:, 3, :3] = torch.randn((batch_size, 3),
                                       dtype=torch.float32,
                                       device=device)
     transform[:, 3, 3] = 1.0
     return transform
    def test_random_rotation_invariant(self):
        """The image of the x-axis isn't biased among quadrants."""
        N = 1000
        base = random_rotation()
        quadrants = list(itertools.product([False, True], repeat=3))

        matrices = random_rotations(N)
        transformed = torch.matmul(base, matrices)
        transformed2 = torch.matmul(matrices, base)

        for k, results in enumerate([matrices, transformed, transformed2]):
            counts = {i: 0 for i in quadrants}
            for j in range(N):
                counts[tuple(i.item() > 0 for i in results[j, 0])] += 1
            average = N / 8.0
            counts_tensor = torch.tensor(list(counts.values()))
            chisquare_statistic = torch.sum(
                (counts_tensor - average) * (counts_tensor - average) / average
            )
            # The 0.1 significance level for chisquare(8-1) is
            # scipy.stats.chi2(7).ppf(0.9) == 12.017.
            self.assertLess(chisquare_statistic, 12, (counts, chisquare_statistic, k))
Esempio n. 14
0
 def test_to_quat(self):
     """mtx -> quat -> mtx"""
     data = random_rotations(13, dtype=torch.float64)
     mdata = quaternion_to_matrix(matrix_to_quaternion(data))
     self.assertTrue(torch.allclose(data, mdata))
Esempio n. 15
0
 def test_to_axis_angle(self):
     """mtx -> axis_angle -> mtx"""
     data = random_rotations(13, dtype=torch.float64)
     euler_angles = matrix_to_axis_angle(data)
     mdata = axis_angle_to_matrix(euler_angles)
     self.assertClose(data, mdata)