Example #1
0
    def _corresponding_cameras_alignment_test_case(
        self,
        cameras,
        R_align_gt,
        T_align_gt,
        s_align_gt,
        estimate_scale,
        mode,
        add_noise,
    ):
        batch_size = cameras.R.shape[0]

        # get target camera centers
        R_new = torch.bmm(R_align_gt[None].expand_as(cameras.R), cameras.R)
        T_new = (
            torch.bmm(T_align_gt[None, None].repeat(batch_size, 1, 1), cameras.R)[:, 0]
            + cameras.T
        ) * s_align_gt

        if add_noise != 0.0:
            R_new = torch.bmm(
                R_new, so3_exponential_map(torch.randn_like(T_new) * add_noise)
            )
            T_new += torch.randn_like(T_new) * add_noise

        # create new cameras from R_new and T_new
        cameras_tgt = cameras.clone()
        cameras_tgt.R = R_new
        cameras_tgt.T = T_new

        # align cameras and cameras_tgt
        cameras_aligned = corresponding_cameras_alignment(
            cameras, cameras_tgt, estimate_scale=estimate_scale, mode=mode
        )

        if batch_size <= 2 and mode == "centers":
            # underdetermined case - check only the center alignment error
            # since the rotation and translation are ambiguous here
            self.assertClose(
                cameras_aligned.get_camera_center(),
                cameras_tgt.get_camera_center(),
                atol=max(add_noise * 7.0, 1e-4),
            )

        else:

            def _rmse(a):
                return (torch.norm(a, dim=1, p=2) ** 2).mean().sqrt()

            if add_noise != 0.0:
                # in a noisy case check mean rotation/translation error for
                # extrinsic alignment and root mean center error for center alignment
                if mode == "centers":
                    self.assertNormsClose(
                        cameras_aligned.get_camera_center(),
                        cameras_tgt.get_camera_center(),
                        _rmse,
                        atol=max(add_noise * 10.0, 1e-4),
                    )
                elif mode == "extrinsics":
                    angle_err = so3_relative_angle(
                        cameras_aligned.R, cameras_tgt.R
                    ).mean()
                    self.assertClose(
                        angle_err, torch.zeros_like(angle_err), atol=add_noise * 10.0
                    )
                    self.assertNormsClose(
                        cameras_aligned.T, cameras_tgt.T, _rmse, atol=add_noise * 7.0
                    )
                else:
                    raise ValueError(mode)

            else:
                # compare the rotations and translations of cameras
                self.assertClose(cameras_aligned.R, cameras_tgt.R, atol=3e-4)
                self.assertClose(cameras_aligned.T, cameras_tgt.T, atol=3e-4)
                # compare the centers
                self.assertClose(
                    cameras_aligned.get_camera_center(),
                    cameras_tgt.get_camera_center(),
                    atol=3e-4,
                )
Example #2
0
 def compute_corresponding_cameras_alignment():
     corresponding_cameras_alignment(
         cameras_src, cameras_tgt, estimate_scale=estimate_scale, mode=mode
     )
     torch.cuda.synchronize()