Ejemplo n.º 1
0
def check_verifier_output_error(verifier: VerifierBase,
                                euler_angle_err_tol: float,
                                translation_err_tol: float) -> None:
    """Check error using annotated correspondences as input, instead of noisy detector-descriptor matches."""
    fx, px, py, k1, k2 = load_log_front_center_intrinsics()
    keypoints_i1, keypoints_i2 = load_argoverse_log_annotated_correspondences()

    # match keypoints row by row
    match_indices = np.vstack(
        [np.arange(len(keypoints_i1)),
         np.arange(len(keypoints_i1))]).T

    i2Ri1, i2ti1, _ = verifier.verify(keypoints_i1, keypoints_i2,
                                      match_indices,
                                      Cal3Bundler(fx, k1, k2, px, py),
                                      Cal3Bundler(fx, k1, k2, px, py))

    # Ground truth is provided in inverse format, so invert SE(3) object
    i2Ti1 = Pose3(i2Ri1, i2ti1.point3())
    i1Ti2 = i2Ti1.inverse()
    i1ti2 = i1Ti2.translation()
    i1Ri2 = i1Ti2.rotation().matrix()

    euler_angles = Rotation.from_matrix(i1Ri2).as_euler("zyx", degrees=True)
    gt_euler_angles = np.array([-0.37, 32.47, -0.42])
    assert np.allclose(gt_euler_angles, euler_angles, atol=euler_angle_err_tol)

    gt_i1ti2 = np.array([0.21, -0.0024, 0.976])
    assert np.allclose(gt_i1ti2, i1ti2, atol=translation_err_tol)
Ejemplo n.º 2
0
    def test_triangulation_individualCal_without_ransac(self):
        """Tests that the triangulation is accurate for individual camera calibration, without RANSAC-based
        triangulation. Checks if cameras and triangulated 3D point are as expected.
        """
        k1 = 0
        k2 = 0
        f, u0, v0 = 1500, 640, 480
        f_, u0_, v0_ = 1600, 650, 440
        K1 = Cal3Bundler(f, k1, k2, u0, v0)
        K2 = Cal3Bundler(f_, k1, k2, u0_, v0_)

        keypoints_list, _, cameras = generate_noisy_2d_measurements(
            world_point=self.expected_landmark,
            calibrations=[K1, K2],
            per_image_noise_vecs=np.zeros((2, 2)),
            poses=get_pose3_vector(num_poses=2),
        )

        # create matches
        # since there is only one measurement in each image, both assigned feature index 0
        matches_dict = {(0, 1): np.array([[0, 0]])}

        da = DataAssociation(
            reproj_error_thresh=5,  # 5 px
            min_track_len=2,  # at least 2 measurements required
            mode=TriangulationParam.NO_RANSAC,
        )
        sfm_data, _ = da.run(len(cameras), cameras, matches_dict,
                             keypoints_list)
        estimated_landmark = sfm_data.get_track(0).point3()
        self.gtsamAssertEquals(estimated_landmark, self.expected_landmark,
                               1e-2)

        for i in sfm_data.get_valid_camera_indices():
            self.gtsamAssertEquals(sfm_data.get_camera(i), cameras.get(i))
Ejemplo n.º 3
0
    def test_bundle_adjust(self):
        """Tests the bundle adjustment for relative pose on a simulated scene."""
        two_view_estimator = TwoViewEstimator(matcher=None,
                                              verifier=None,
                                              inlier_support_processor=None,
                                              bundle_adjust_2view=True,
                                              eval_threshold_px=4)

        # Extract example poses.
        i1Ri2 = EXAMPLE_DATA.get_camera(1).pose().rotation()
        i1ti2 = EXAMPLE_DATA.get_camera(1).pose().translation()
        i2Ti1 = Pose3(i1Ri2, i1ti2).inverse()
        i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

        # Perform bundle adjustment.
        i2Ri1_optimized, i2Ui1_optimized, corr_idxs = two_view_estimator.bundle_adjust(
            keypoints_i1=self.keypoints_i1,
            keypoints_i2=self.keypoints_i2,
            verified_corr_idxs=self.corr_idxs,
            camera_intrinsics_i1=Cal3Bundler(),
            camera_intrinsics_i2=Cal3Bundler(),
            i2Ri1_initial=i2Ei1.rotation(),
            i2Ui1_initial=i2Ei1.direction(),
            i2Ti1_prior=None,
        )

        # Assert
        rotation_angular_error = comp_utils.compute_relative_rotation_angle(
            i2Ri1_optimized, i2Ei1.rotation())
        translation_angular_error = comp_utils.compute_relative_unit_translation_angle(
            i2Ui1_optimized, i2Ei1.direction())
        self.assertLessEqual(rotation_angular_error, 1)
        self.assertLessEqual(translation_angular_error, 1)
        np.testing.assert_allclose(corr_idxs, self.corr_idxs)
Ejemplo n.º 4
0
    def test_values(self):
        values = Values()
        E = EssentialMatrix(Rot3(), Unit3())
        tol = 1e-9

        values.insert(0, Point2(0, 0))
        values.insert(1, Point3(0, 0, 0))
        values.insert(2, Rot2())
        values.insert(3, Pose2())
        values.insert(4, Rot3())
        values.insert(5, Pose3())
        values.insert(6, Cal3_S2())
        values.insert(7, Cal3DS2())
        values.insert(8, Cal3Bundler())
        values.insert(9, E)
        values.insert(10, imuBias_ConstantBias())

        # Special cases for Vectors and Matrices
        # Note that gtsam's Eigen Vectors and Matrices requires double-precision
        # floating point numbers in column-major (Fortran style) storage order,
        # whereas by default, numpy.array is in row-major order and the type is
        # in whatever the number input type is, e.g. np.array([1,2,3])
        # will have 'int' type.
        #
        # The wrapper will automatically fix the type and storage order for you,
        # but for performance reasons, it's recommended to specify the correct
        # type and storage order.
        # for vectors, the order is not important, but dtype still is
        vec = np.array([1., 2., 3.])
        values.insert(11, vec)
        mat = np.array([[1., 2.], [3., 4.]], order='F')
        values.insert(12, mat)
        # Test with dtype int and the default order='C'
        # This still works as the wrapper converts to the correct type and order for you
        # but is nornally not recommended!
        mat2 = np.array([[1, 2, ], [3, 5]])
        values.insert(13, mat2)

        self.assertTrue(values.atPoint2(0).equals(Point2(0,0), tol))
        self.assertTrue(values.atPoint3(1).equals(Point3(0,0,0), tol))
        self.assertTrue(values.atRot2(2).equals(Rot2(), tol))
        self.assertTrue(values.atPose2(3).equals(Pose2(), tol))
        self.assertTrue(values.atRot3(4).equals(Rot3(), tol))
        self.assertTrue(values.atPose3(5).equals(Pose3(), tol))
        self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol))
        self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol))
        self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol))
        self.assertTrue(values.atEssentialMatrix(9).equals(E, tol))
        self.assertTrue(values.atimuBias_ConstantBias(
            10).equals(imuBias_ConstantBias(), tol))

        # special cases for Vector and Matrix:
        actualVector = values.atVector(11)
        self.assertTrue(np.allclose(vec, actualVector, tol))
        actualMatrix = values.atMatrix(12)
        self.assertTrue(np.allclose(mat, actualMatrix, tol))
        actualMatrix2 = values.atMatrix(13)
        self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
Ejemplo n.º 5
0
def generate_random_input_for_verifier() -> Tuple[Keypoints, Keypoints, np.ndarray, Cal3Bundler, Cal3Bundler]:
    """Generates random inputs for verification.

    Returns:
        Keypoints for image #i1.
        Keypoints for image #i2.
        Indices of match between image pair (#i1, #i2).
        Intrinsics for image #i1.
        Intrinsics for image #i2.
    """

    # Randomly generate number of keypoints
    num_keypoints_i1 = random.randint(0, 100)
    num_keypoints_i2 = random.randint(0, 100)

    # randomly generate image shapes
    image_shape_i1 = [random.randint(100, 400), random.randint(100, 400)]
    image_shape_i2 = [random.randint(100, 400), random.randint(100, 400)]

    # generate intrinsics from image shapes
    intrinsics_i1 = Cal3Bundler(
        fx=min(image_shape_i1[0], image_shape_i1[1]),
        k1=0,
        k2=0,
        u0=image_shape_i1[0] / 2,
        v0=image_shape_i1[1] / 2,
    )

    intrinsics_i2 = Cal3Bundler(
        fx=min(image_shape_i2[0], image_shape_i2[1]),
        k1=0,
        k2=0,
        u0=image_shape_i2[0] / 2,
        v0=image_shape_i2[1] / 2,
    )

    # randomly generate the keypoints
    keypoints_i1 = generate_random_keypoints(num_keypoints_i1, image_shape_i1)
    keypoints_i2 = generate_random_keypoints(num_keypoints_i2, image_shape_i2)

    # randomly generate matches
    num_matches = random.randint(0, min(num_keypoints_i1, num_keypoints_i2))
    if num_matches == 0:
        matching_indices_i1i2 = np.array([], dtype=np.int32)
    else:
        matching_indices_i1i2 = np.empty((num_matches, 2), dtype=np.int32)
        matching_indices_i1i2[:, 0] = np.random.choice(num_keypoints_i1, size=(num_matches,), replace=False)
        matching_indices_i1i2[:, 1] = np.random.choice(num_keypoints_i2, size=(num_matches,), replace=False)

    return (
        keypoints_i1,
        keypoints_i2,
        matching_indices_i1i2,
        intrinsics_i1,
        intrinsics_i2,
    )
Ejemplo n.º 6
0
Archivo: io.py Proyecto: borglab/gtsfm
def read_cameras_txt(fpath: str) -> Optional[List[Cal3Bundler]]:
    """Read camera calibrations from a COLMAP-formatted cameras.txt file.

    Reference: https://colmap.github.io/format.html#cameras-txt

    Args:
        fpaths: path to cameras.txt file

    Returns:
        calibration object for each camera, or None if requested file is non-existent
    """
    if not Path(fpath).exists():
        logger.info("%s does not exist", fpath)
        return None

    with open(fpath, "r") as f:
        lines = f.readlines()

    # may not be one line per camera (could be only one line of text if shared calibration)
    num_cams = int(lines[2].replace("# Number of cameras: ", "").strip())

    calibrations = []
    for line in lines[3:]:

        cam_params = line.split()
        # Note that u0 is px, and v0 is py
        model = cam_params[1]
        # Currently only handles SIMPLE RADIAL and RADIAL camera models
        assert model in ["SIMPLE_RADIAL", "RADIAL"]
        if model == "SIMPLE_RADIAL":
            _, _, img_w, img_h, fx, u0, v0, k1 = cam_params[:8]
            img_w, img_h, fx, u0, v0, k1 = int(img_w), int(img_h), float(
                fx), float(u0), float(v0), float(k1)
            # Convert COLMAP's SIMPLE_RADIAL to GTSAM's Cal3Bundler:
            # Add second radial distortion coefficient of value zero.
            k2 = 0
            calibrations.append(Cal3Bundler(fx, k1, k2, u0, v0))
        elif model == "RADIAL":
            _, _, img_w, img_h, fx, u0, v0, k1, k2 = cam_params[:9]
            img_w, img_h, fx, u0, v0, k1, k2 = (
                int(img_w),
                int(img_h),
                float(fx),
                float(u0),
                float(v0),
                float(k1),
                float(k2),
            )
            calibrations.append(Cal3Bundler(fx, k1, k2, u0, v0))

    assert len(calibrations) == num_cams
    return calibrations
Ejemplo n.º 7
0
    def test_two_view_correspondences(self):
        """Tests the bundle adjustment for relative pose on a simulated scene."""

        i1Ri2 = EXAMPLE_DATA.get_camera(1).pose().rotation()
        i1ti2 = EXAMPLE_DATA.get_camera(1).pose().translation()
        i2Ti1 = Pose3(i1Ri2, i1ti2)
        camera_i1 = PinholeCameraCal3Bundler(Pose3(), Cal3Bundler())
        camera_i2 = PinholeCameraCal3Bundler(i2Ti1, Cal3Bundler())
        tracks_3d, valid_indices = TwoViewEstimator.triangulate_two_view_correspondences(
            camera_i1, camera_i2, self.keypoints_i1, self.keypoints_i2,
            self.corr_idxs)
        self.assertEqual(len(tracks_3d), 5)
        self.assertEqual(len(valid_indices), 5)
Ejemplo n.º 8
0
    def test_recover_relative_pose_from_essential_matrix(self):
        """Test for function to extract relative pose from essential matrix."""

        # simulate correspondences and the essential matrix
        corr_i1, corr_i2, i2Ei1 = simulate_two_planes_scene(10, 10)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1.matrix(), corr_i1.coordinates, corr_i2.coordinates,
            Cal3Bundler(), Cal3Bundler())

        # compare the recovered R and U with the ground truth
        self.assertTrue(i2Ri1.equals(i2Ei1.rotation(), 1e-3))
        self.assertTrue(i2Ui1.equals(i2Ei1.direction(), 1e-3))
Ejemplo n.º 9
0
    def test_round_trip_images_txt(self) -> None:
        """Starts with a pose. Writes the pose to images.txt (in a temporary directory). Then reads images.txt to recover
        that same pose. Checks if the original wTc and recovered wTc match up."""

        # fmt: off
        # Rotation 45 degrees about the z-axis.
        original_wRc = np.array([[np.cos(np.pi / 4), -np.sin(np.pi / 4), 0],
                                 [np.sin(np.pi / 4),
                                  np.cos(np.pi / 4), 0], [0, 0, 1]])
        original_wtc = np.array([3, -2, 1])
        # fmt: on

        # Setup dummy GtsfmData Object with one image
        original_wTc = Pose3(Rot3(original_wRc), original_wtc)
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        camera = PinholeCameraCal3Bundler(original_wTc, default_intrinsics)
        gtsfm_data = GtsfmData(number_images=1)
        gtsfm_data.add_camera(0, camera)

        image = Image(value_array=None, file_name="dummy_image.jpg")
        images = [image]

        # Perform write and read operations inside a temporary directory
        with tempfile.TemporaryDirectory() as tempdir:
            images_fpath = os.path.join(tempdir, "images.txt")

            io_utils.write_images(gtsfm_data, images, tempdir)
            wTi_list, _ = io_utils.read_images_txt(images_fpath)
            recovered_wTc = wTi_list[0]

        npt.assert_almost_equal(original_wTc.matrix(),
                                recovered_wTc.matrix(),
                                decimal=3)
Ejemplo n.º 10
0
    def test_compute_keypoint_intersections(self) -> None:
        """Tests `compute_keypoint_intersections()` function."""
        # Create cube mesh with side length one centered at origin.
        box = trimesh.primitives.Box()

        # Create arrangement of 4 cameras in x-z plane pointing at the cube.
        fx, k1, k2, u0, v0 = 10, 0, 0, 1, 1
        calibration = Cal3Bundler(fx, k1, k2, u0, v0)
        cam_pos = [[2, 0, 0], [-2, 0, 0], [0, 0, 2], [0, 0, -2]]
        target_pos = [0, 0, 0]
        up_vector = [0, -1, 0]
        cams = [
            PinholeCameraCal3Bundler().Lookat(c, target_pos, up_vector,
                                              calibration) for c in cam_pos
        ]

        # Project keypoint at center of each simulated image and record intersection.
        kpt = Keypoints(coordinates=np.array([[1, 1]]).astype(np.float32))
        expected_intersections = [[0.5, 0, 0], [-0.5, 0, 0], [0, 0, 0.5],
                                  [0, 0, -0.5]]
        estimated_intersections = []
        for cam in cams:
            _, intersection = metric_utils.compute_keypoint_intersections(
                kpt, cam, box, verbose=True)
            estimated_intersections.append(intersection.flatten().tolist())
        np.testing.assert_allclose(expected_intersections,
                                   estimated_intersections)
Ejemplo n.º 11
0
    def __read_calibrations(self) -> List[PinholeCameraCal3Bundler]:
        """Read camera params from the calibration stored as h5 files.

        Returns:
            list of all cameras.
        """

        file_path_template = osp.join(self._folder, "calibration",
                                      "calibration_{}.h5")

        pose_list = []

        for image_name in self._image_names:
            file_path = file_path_template.format(image_name)
            calib_data = io_utils.load_h5(file_path)

            cTw = Pose3(Rot3(calib_data["R"]), calib_data["T"])
            K_matrix = calib_data["K"]
            # TODO: fix different fx and fy (and underparameterization of K)
            K = Cal3Bundler(
                fx=float(K_matrix[0, 0] + K_matrix[1, 1]) * 0.5,
                k1=0.0,
                k2=0.0,
                u0=float(K_matrix[0, 2]),
                v0=float(K_matrix[1, 2]),
            )

            pose_list.append(PinholeCameraCal3Bundler(cTw.inverse(), K))

        return pose_list
Ejemplo n.º 12
0
    def test_create_computation_graph(self):
        """Test the dask computation graph."""
        sharedCal = Cal3Bundler(1500, 0, 0, 640, 480)
        cameras = {
            i: PinholeCameraCal3Bundler(x, sharedCal)
            for (i, x) in enumerate(self.poses)
        }

        camera_graph = dask.delayed(cameras)

        corr_idxs_graph = {
            k: dask.delayed(v)
            for (k, v) in self.dummy_corr_idxs_dict.items()
        }

        keypoints_graph = [dask.delayed(x) for x in self.keypoints_list]

        da_graph = self.obj.create_computation_graph(camera_graph,
                                                     corr_idxs_graph,
                                                     keypoints_graph)

        with dask.config.set(scheduler="single-threaded"):
            dask_result = dask.compute(da_graph)[0]

        self.assertIsInstance(dask_result, SfmData)
Ejemplo n.º 13
0
    def get_camera_intrinsics(self, index: int) -> Optional[Cal3Bundler]:
        """Get the camera intrinsics at the given index.

        Args:
            the index to fetch.

        Returns:
            intrinsics for the given camera.
        """
        if len(self.explicit_intrinsics_paths) == 0:
            # get intrinsics from exif

            return io_utils.load_image(
                self.image_paths[index]).get_intrinsics_from_exif()

        else:
            # TODO: handle extra inputs in the intrinsics array
            intrinsics_array = np.load(self.explicit_intrinsics_paths[index])

            return Cal3Bundler(
                fx=min(intrinsics_array[0, 0], intrinsics_array[1, 1]),
                k1=0,
                k2=0,
                u0=intrinsics_array[0, 2],
                v0=intrinsics_array[1, 2],
            )
Ejemplo n.º 14
0
def simulate_two_planes_scene(M: int, N: int) -> Tuple[Keypoints, Keypoints, EssentialMatrix]:
    """Generate a scene where 3D points are on two planes, and projects the points to the 2 cameras. There are M points
    on plane 1, and N points on plane 2.

    The two planes in this test are:
    1. -10x -y -20z +150 = 0
    2. 15x -2y -35z +200 = 0

    Args:
        M: number of points on 1st plane.
        N: number of points on 2nd plane.

    Returns:
        keypoints for image i1, of length (M+N).
        keypoints for image i2, of length (M+N).
        Essential matrix i2Ei1.
    """
    # range of 3D points
    range_x_coordinate = (-5, 7)
    range_y_coordinate = (-10, 10)

    # define the plane equation
    plane1_coeffs = (-10, -1, -20, 150)
    plane2_coeffs = (15, -2, -35, 200)

    # sample the points from planes
    plane1_points = sample_points_on_plane(plane1_coeffs, range_x_coordinate, range_y_coordinate, M)
    plane2_points = sample_points_on_plane(plane2_coeffs, range_x_coordinate, range_y_coordinate, N)

    points_3d = np.vstack((plane1_points, plane2_points))

    # define the camera poses and compute the essential matrix
    wti1 = np.array([0.1, 0, -20])
    wti2 = np.array([1, -2, -20.4])

    wRi1 = Rot3.RzRyRx(np.pi / 20, 0, 0.0)
    wRi2 = Rot3.RzRyRx(0.0, np.pi / 6, 0.0)

    wTi1 = Pose3(wRi1, wti1)
    wTi2 = Pose3(wRi2, wti2)
    i2Ti1 = wTi2.between(wTi1)

    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

    # project 3D points to 2D image measurements
    intrinsics = Cal3Bundler()
    camera_i1 = PinholeCameraCal3Bundler(wTi1, intrinsics)
    camera_i2 = PinholeCameraCal3Bundler(wTi2, intrinsics)

    uv_im1 = []
    uv_im2 = []
    for point in points_3d:
        uv_im1.append(camera_i1.project(point))
        uv_im2.append(camera_i2.project(point))

    uv_im1 = np.vstack(uv_im1)
    uv_im2 = np.vstack(uv_im2)

    # return the points as keypoints and the essential matrix
    return Keypoints(coordinates=uv_im1), Keypoints(coordinates=uv_im2), i2Ei1
Ejemplo n.º 15
0
def test_compute_track_reprojection_errors():
    """Ensure that reprojection error is computed properly within a track.

    # For camera 0:
    # [13] = [10,0,3]   [1,0,0 | 0]  [1]
    # [24] = [0,10,4] * [0,1,0 | 0] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]

    # For camera 1:
    # [-7] = [10,0,3]   [1,0,0 |-2]  [1]
    # [44] = [0,10,4] * [0,1,0 | 2] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]
    """
    wTi0 = Pose3(Rot3.RzRyRx(0, 0, 0), np.zeros((3, 1)))
    wTi1 = Pose3(Rot3.RzRyRx(0, 0, 0), np.array([2, -2, 0]))

    f = 10
    k1 = 0
    k2 = 0
    u0 = 3
    v0 = 4

    K0 = Cal3Bundler(f, k1, k2, u0, v0)
    K1 = Cal3Bundler(f, k1, k2, u0, v0)

    track_camera_dict = {
        0: PinholeCameraCal3Bundler(wTi0, K0),
        1: PinholeCameraCal3Bundler(wTi1, K1)
    }

    triangulated_pt = np.array([1, 2, 1])
    track_3d = SfmTrack(triangulated_pt)

    # in camera 0
    track_3d.addMeasurement(idx=0, m=np.array([13, 24]))
    # in camera 1
    track_3d.addMeasurement(idx=1, m=np.array(
        [-8, 43]))  # should be (-7,44), 1 px error in each dim

    errors, avg_track_reproj_error = reproj_utils.compute_track_reprojection_errors(
        track_camera_dict, track_3d)

    expected_errors = np.array([0, np.sqrt(2)])
    np.testing.assert_allclose(errors, expected_errors)
    assert avg_track_reproj_error == np.sqrt(2) / 2
def test_filter_to_cycle_consistent_edges() -> None:
    """Ensure correct edges are kept in a 2-triplet scenario.

    Scenario Ground Truth: consider 5 camera poses in a line, connected as follows, all with identity rotations:

    Spatial layout:
      _________    ________
     /         \\ /        \
    i4 -- i3 -- i2 -- i1 -- i0

    Topological layout:

     i4          i0
           i2
     i3          i1

    In the measurements, suppose, the measurement for (i2,i4) was corrupted by 15 degrees.
    """
    i2Ri1_dict = {
        (0, 1): Rot3(),
        (1, 2): Rot3(),
        (0, 2): Rot3(),
        (2, 3): Rot3(),
        (3, 4): Rot3(),
        (2, 4): Rot3.Ry(np.deg2rad(15)),
    }
    i2Ui1_dict = {
        (0, 1): Unit3(np.array([1, 0, 0])),
        (1, 2): Unit3(np.array([1, 0, 0])),
        (0, 2): Unit3(np.array([1, 0, 0])),
        (2, 3): Unit3(np.array([1, 0, 0])),
        (3, 4): Unit3(np.array([1, 0, 0])),
        (2, 4): Unit3(np.array([1, 0, 0])),
    }
    # The ViewGraphEstimator assumes these dicts contain the keys corresponding to the the rotation edges.
    calibrations = {k: Cal3Bundler() for k in range(0, 5)}
    corr_idxs_i1i2 = {i1i2: np.array([]) for i1i2 in i2Ri1_dict.keys()}
    keypoints = {k: np.array([]) for k in range(0, 5)}

    # populate dummy 2-view reports
    two_view_reports = {}
    for (i1, i2) in i2Ri1_dict.keys():
        two_view_reports[(i1, i2)] = TwoViewEstimationReport(
            v_corr_idxs=np.array([]),  # dummy array
            num_inliers_est_model=10,  # dummy value
        )

    rcc_estimator = CycleConsistentRotationViewGraphEstimator(
        EdgeErrorAggregationCriterion.MEDIAN_EDGE_ERROR)
    viewgraph_edges = rcc_estimator.run(i2Ri1_dict=i2Ri1_dict,
                                        i2Ui1_dict=i2Ui1_dict,
                                        calibrations=calibrations,
                                        corr_idxs_i1i2=corr_idxs_i1i2,
                                        keypoints=keypoints,
                                        two_view_reports=two_view_reports)

    # non-self-consistent triplet should have been removed
    expected_edges = {(0, 1), (1, 2), (0, 2)}
    assert set(viewgraph_edges) == expected_edges
Ejemplo n.º 17
0
 def test_get_camera_intrinsics_exif(self):
     """Tests getter for intrinsics when explicit numpy arrays are absent and we fall back on exif."""
     loader = OlssonLoader(EXIF_FOLDER,
                           image_extension="JPG",
                           use_gt_intrinsics=False)
     computed = loader.get_camera_intrinsics(5)
     expected = Cal3Bundler(fx=2378.983, k1=0, k2=0, u0=648.0, v0=968.0)
     self.assertTrue(expected.equals(computed, 1e-3))
Ejemplo n.º 18
0
    def test_get_camera_intrinsics_explicit(self):
        """Tests getter for intrinsics when explicit numpy arrays with intrinsics are present on disk."""

        computed = self.loader.get_camera_intrinsics(5)

        expected = Cal3Bundler(fx=2378.983, k1=0, k2=0, u0=968.0, v0=648.0)

        self.assertTrue(expected.equals(computed, 1e-3))
Ejemplo n.º 19
0
    def test_recover_relative_pose_from_essential_matrix_none(self):
        """Test for function to extract relative pose from essential matrix."""

        # simulate correspondences and the essential matrix
        corr_i1, corr_i2, _ = simulate_two_planes_scene(10, 10)

        i2Ri1, i2Ui1 = verification_utils.recover_relative_pose_from_essential_matrix(
            i2Ei1=None,
            verified_coordinates_i1=corr_i1.coordinates,
            verified_coordinates_i2=corr_i2.coordinates,
            camera_intrinsics_i1=Cal3Bundler(),
            camera_intrinsics_i2=Cal3Bundler(),
        )

        # compare the recovered R and U with the ground truth
        self.assertIsNone(i2Ri1)
        self.assertIsNone(i2Ui1)
Ejemplo n.º 20
0
    def test_normalize_coordinates(self):
        coordinates = np.array([[10.0, 20.0], [25.0, 12.0], [30.0, 33.0]])

        intrinsics = Cal3Bundler(fx=100, k1=0.0, k2=0.0, u0=20.0, v0=30.0)

        normalized_coordinates = feature_utils.normalize_coordinates(coordinates, intrinsics)

        expected_coordinates = np.array([[-0.1, -0.1], [0.05, -0.18], [0.1, 0.03]])

        np.testing.assert_allclose(normalized_coordinates, expected_coordinates)
Ejemplo n.º 21
0
    def test_verify_empty_matches(self):
        """Tests the output when there are no match indices."""

        keypoints_i1 = feature_utils.generate_random_keypoints(10, [250, 300])
        keypoints_i2 = feature_utils.generate_random_keypoints(12, [400, 300])
        match_indices = np.array([], dtype=np.int32)
        intrinsics_i1 = Cal3Bundler()
        intrinsics_i2 = Cal3Bundler()

        self.__execute_test(
            keypoints_i1,
            keypoints_i2,
            match_indices,
            intrinsics_i1,
            intrinsics_i2,
            i2Ri1_expected=None,
            i2Ui1_expected=None,
            verified_indices_expected=np.array([], dtype=np.uint32),
        )
Ejemplo n.º 22
0
    def setUp(self):
        """Set up the data association module."""
        super().setUp()

        # landmark ~5 meters infront of camera
        self.expected_landmark = Point3(5, 0.5, 1.2)

        # shared calibration
        f, k1, k2, u0, v0 = 1500, 0, 0, 640, 480
        self.sharedCal = Cal3Bundler(f, k1, k2, u0, v0)
Ejemplo n.º 23
0
    def test_5pt_algo_5correspondences(self) -> None:
        """ """
        fx, px, py, k1, k2 = load_log_front_center_intrinsics()
        keypoints_i1, keypoints_i2 = load_argoverse_log_annotated_correspondences(
        )

        # match keypoints row by row
        match_indices = np.vstack(
            [np.arange(len(keypoints_i1)),
             np.arange(len(keypoints_i1))]).T

        intrinsics_i1 = Cal3Bundler(fx, k1, k2, px, py)
        intrinsics_i2 = Cal3Bundler(fx, k1, k2, px, py)

        match_indices = match_indices[:5]

        i2Ri1, i2ti1, _, _ = self.verifier.verify(keypoints_i1, keypoints_i2,
                                                  match_indices, intrinsics_i1,
                                                  intrinsics_i2)
Ejemplo n.º 24
0
    def test_get_camera_intrinsics_explicit(self):
        """Tests getter for intrinsics when explicit data.mat file with intrinsics are present on disk."""
        expected_fx = 1392.10693

        expected_px = 980.175985
        expected_py = 604.353418

        computed = self.loader.get_camera_intrinsics(0)
        expected = Cal3Bundler(fx=expected_fx, k1=0, k2=0, u0=expected_px, v0=expected_py)
        self.assertTrue(expected.equals(computed, 1e-3))
Ejemplo n.º 25
0
    def test_round_trip_cameras_txt(self) -> None:
        """Creates a two cameras and writes to cameras.txt (in a temporary directory). Then reads cameras.txt to recover
        the information. Checks if the original and recovered cameras match up."""

        # Create multiple calibration data
        k1 = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        k2 = Cal3Bundler(fx=200, k1=0.001, k2=0, u0=1000, v0=2000)
        k3 = Cal3Bundler(fx=300, k1=0.004, k2=0.001, u0=1001, v0=2002)
        original_calibrations = [k1, k2, k3]

        gtsfm_data = GtsfmData(number_images=len(original_calibrations))

        # Populate gtsfm_data with the generated vales
        for i in range(len(original_calibrations)):
            camera = PinholeCameraCal3Bundler(Pose3(),
                                              original_calibrations[i])
            gtsfm_data.add_camera(i, camera)

        # Generate dummy images
        image = Image(value_array=np.zeros((240, 320)),
                      file_name="dummy_image.jpg")
        images = [image for i in range(len(original_calibrations))]

        # Round trip
        with tempfile.TemporaryDirectory() as tempdir:
            cameras_fpath = os.path.join(tempdir, "cameras.txt")

            io_utils.write_cameras(gtsfm_data, images, tempdir)
            recovered_calibrations = io_utils.read_cameras_txt(cameras_fpath)

        self.assertEqual(len(original_calibrations),
                         len(recovered_calibrations))

        for i in range(len(recovered_calibrations)):
            K_ori = original_calibrations[i]
            K_rec = recovered_calibrations[i]

            self.assertEqual(K_ori.fx(), K_rec.fx())
            self.assertEqual(K_ori.px(), K_rec.px())
            self.assertEqual(K_ori.py(), K_rec.py())
            self.assertEqual(K_ori.k1(), K_rec.k1())
            self.assertEqual(K_ori.k2(), K_rec.k2())
Ejemplo n.º 26
0
    def test_pinholeCameraCal3Bundler_roundtrip(self):
        """Test the round-trip on Unit3 object."""

        expected = PinholeCameraCal3Bundler(
            Pose3(Rot3.RzRyRx(0, 0.1, -0.05), np.random.randn(3, 1)),
            Cal3Bundler(fx=100, k1=0.1, k2=0.2, u0=100, v0=70),
        )
        header, frames = serialize(expected)
        recovered = deserialize(header, frames)

        self.assertTrue(expected.equals(recovered, 1e-5))
Ejemplo n.º 27
0
    def test_get_camera_valid(self):
        """Test for get_camera for a valid index."""
        expected = PinholeCameraCal3Bundler(
            Pose3(), Cal3Bundler(fx=900, k1=0, k2=0, u0=100, v0=100))

        i = 0
        test_data = GtsfmData(1)
        test_data.add_camera(index=i, camera=expected)

        computed = test_data.get_camera(i)
        self.assertTrue(computed.equals(expected, EQUALITY_TOLERANCE))
Ejemplo n.º 28
0
    def test_verify_empty_matches(self):
        """Tests the output when there are no match indices."""

        keypoints_i1 = generate_random_keypoints(10, [250, 300])
        keypoints_i2 = generate_random_keypoints(12, [400, 300])
        match_indices = np.array([], dtype=np.int32)
        intrinsics_i1 = Cal3Bundler()
        intrinsics_i2 = Cal3Bundler()

        (i2Ri1, i2Ui1, verified_indices,) = self.verifier.verify_with_exact_intrinsics(
            keypoints_i1,
            keypoints_i2,
            match_indices,
            intrinsics_i1,
            intrinsics_i2,
        )

        self.assertIsNone(i2Ri1)
        self.assertIsNone(i2Ui1)
        self.assertEqual(0, verified_indices.size)
Ejemplo n.º 29
0
def test_compute_point_reprojection_errors():
    """Ensure a hypothesized 3d point is projected correctly and compared w/ 2 measurements.
    # For camera 0:
    # [13] = [10,0,3]   [1,0,0 | 0]  [1]
    # [24] = [0,10,4] * [0,1,0 | 0] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]

    # For camera 1:
    # [-7] = [10,0,3]   [1,0,0 |-2]  [1]
    # [44] = [0,10,4] * [0,1,0 | 2] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]
    """
    wTi0 = Pose3(Rot3.RzRyRx(0, 0, 0), np.zeros((3, 1)))
    wTi1 = Pose3(Rot3.RzRyRx(0, 0, 0), np.array([2, -2, 0]))

    f = 10
    k1 = 0
    k2 = 0
    u0 = 3
    v0 = 4

    K0 = Cal3Bundler(f, k1, k2, u0, v0)
    K1 = Cal3Bundler(f, k1, k2, u0, v0)

    track_camera_dict = {
        0: PinholeCameraCal3Bundler(wTi0, K0),
        1: PinholeCameraCal3Bundler(wTi1, K1)
    }
    point3d = np.array([1, 2, 1])
    measurements = [
        SfmMeasurement(i=1, uv=np.array([-8, 43])),
        SfmMeasurement(i=0, uv=np.array([13, 24]))
    ]

    errors, avg_track_reproj_error = reproj_utils.compute_point_reprojection_errors(
        track_camera_dict, point3d, measurements)
    expected_errors = np.array([np.sqrt(2), 0])
    np.testing.assert_allclose(errors, expected_errors)
    assert avg_track_reproj_error == np.sqrt(2) / 2
Ejemplo n.º 30
0
    def test_simple_scene(self):
        """Test a simple scene with 10 points, 5 each on 2 planes, so that RANSAC family of methods do not
        get trapped into a degenerate sample."""
        # obtain the keypoints and the ground truth essential matrix.
        keypoints_i1, keypoints_i2, i2Ei1_expected = simulate_two_planes_scene(
            4, 4)

        # match keypoints row by row
        match_indices = np.vstack(
            (np.arange(len(keypoints_i1)), np.arange(len(keypoints_i1)))).T

        # run the test w/ and w/o using intrinsics in verification
        self.__execute_test(
            keypoints_i1,
            keypoints_i2,
            match_indices,
            Cal3Bundler(),
            Cal3Bundler(),
            i2Ei1_expected.rotation(),
            i2Ei1_expected.direction(),
            match_indices,
        )