Exemple #1
0
def simulate_two_planes_scene(M: int, N: int) -> Tuple[Keypoints, Keypoints, EssentialMatrix]:
    """Generate a scene where 3D points are on two planes, and projects the points to the 2 cameras. There are M points
    on plane 1, and N points on plane 2.

    The two planes in this test are:
    1. -10x -y -20z +150 = 0
    2. 15x -2y -35z +200 = 0

    Args:
        M: number of points on 1st plane.
        N: number of points on 2nd plane.

    Returns:
        keypoints for image i1, of length (M+N).
        keypoints for image i2, of length (M+N).
        Essential matrix i2Ei1.
    """
    # range of 3D points
    range_x_coordinate = (-5, 7)
    range_y_coordinate = (-10, 10)

    # define the plane equation
    plane1_coeffs = (-10, -1, -20, 150)
    plane2_coeffs = (15, -2, -35, 200)

    # sample the points from planes
    plane1_points = sample_points_on_plane(plane1_coeffs, range_x_coordinate, range_y_coordinate, M)
    plane2_points = sample_points_on_plane(plane2_coeffs, range_x_coordinate, range_y_coordinate, N)

    points_3d = np.vstack((plane1_points, plane2_points))

    # define the camera poses and compute the essential matrix
    wti1 = np.array([0.1, 0, -20])
    wti2 = np.array([1, -2, -20.4])

    wRi1 = Rot3.RzRyRx(np.pi / 20, 0, 0.0)
    wRi2 = Rot3.RzRyRx(0.0, np.pi / 6, 0.0)

    wTi1 = Pose3(wRi1, wti1)
    wTi2 = Pose3(wRi2, wti2)
    i2Ti1 = wTi2.between(wTi1)

    i2Ei1 = EssentialMatrix(i2Ti1.rotation(), Unit3(i2Ti1.translation()))

    # project 3D points to 2D image measurements
    intrinsics = Cal3Bundler()
    camera_i1 = PinholeCameraCal3Bundler(wTi1, intrinsics)
    camera_i2 = PinholeCameraCal3Bundler(wTi2, intrinsics)

    uv_im1 = []
    uv_im2 = []
    for point in points_3d:
        uv_im1.append(camera_i1.project(point))
        uv_im2.append(camera_i2.project(point))

    uv_im1 = np.vstack(uv_im1)
    uv_im2 = np.vstack(uv_im2)

    # return the points as keypoints and the essential matrix
    return Keypoints(coordinates=uv_im1), Keypoints(coordinates=uv_im2), i2Ei1
Exemple #2
0
def calculate_triangulation_angle_in_degrees(
        camera_1: PinholeCameraCal3Bundler, camera_2: PinholeCameraCal3Bundler,
        point_3d: np.ndarray) -> float:
    """Calculates the angle formed at the 3D point by the rays backprojected from 2 cameras.
    In the setup with X (point_3d) and two cameras C1 and C2, the triangulation angle is the angle between rays C1-X
    and C2-X, i.e. the angle subtendted at the 3d point.
        X
       / \
      /   \
     /     \
    C1      C2
    References:
    - https://github.com/colmap/colmap/blob/dev/src/base/triangulation.cc#L122
    
    Args:
        camera_1: the first camera.
        camera_2: the second camera.
        point_3d: the 3d point which is imaged by the two camera centers, and where the angle between the light rays 
                  associated with the measurements are computed.
    Returns:
        the angle formed at the 3d point, in degrees.
    """
    camera_center_1: np.ndarray = camera_1.pose().translation()
    camera_center_2: np.ndarray = camera_2.pose().translation()

    # compute the two rays
    ray_1 = point_3d - camera_center_1
    ray_2 = point_3d - camera_center_2

    return geometry_utils.compute_relative_unit_translation_angle(
        Unit3(ray_1), Unit3(ray_2))
Exemple #3
0
    def test_calculate_triangulation_angle_in_degrees(self) -> None:
        """Test the computation of triangulation angle using a simple example.
        Lengths of line segments are defined as follows:
                   5*sqrt(2)
                  X ---- C1
                  |    /
        5*sqrt(2) |  / 10 = 5*sqrt(2)*sqrt(2)
                  C2
        Cameras and point situated as follows in the x-z plane:
        (0,0,0)
             o---- +z
             |
             |
             +x
                      X (5,0,5)
        (10,0,0)
             o---- +z
             |
             |
             +x
        """
        camera_center_1 = np.array([0, 0, 0])
        camera_center_2 = np.array([10, 0, 0])
        point_3d = np.array([5, 0, 5])

        expected = 90

        wT0 = Pose3(Rot3(), camera_center_1)
        wT1 = Pose3(Rot3(), camera_center_2)
        computed = mvs_utils.calculate_triangulation_angle_in_degrees(
            camera_1=PinholeCameraCal3Bundler(wT0),
            camera_2=PinholeCameraCal3Bundler(wT1),
            point_3d=point_3d,
        )
        self.assertAlmostEqual(computed, expected)
Exemple #4
0
def compute_keypoint_intersections(
        keypoints: Keypoints,
        gt_camera: PinholeCameraCal3Bundler,
        gt_scene_mesh: Trimesh,
        verbose: bool = False) -> Tuple[np.ndarray, np.ndarray]:
    """Computes intersections between ground truth surface mesh and rays originating from image keypoints.

    Args:
        keypoints: N keypoints computed in image.
        gt_camera: ground truth camera.
        gt_scene_mesh: ground truth triangular surface mesh.

    Returns:
        keypoint_ind: (M,) array of keypoint indices whose corresponding ray intersected the ground truth mesh.
        intersections_locations: (M, 3), array of ray intersection locations.
    """
    num_kpts = len(keypoints)
    src = np.repeat(gt_camera.pose().translation().reshape((-1, 3)),
                    num_kpts,
                    axis=0)  # At_i1A
    drc = np.asarray([
        gt_camera.backproject(keypoints.coordinates[i], depth=1.0) - src[i, :]
        for i in range(num_kpts)
    ])
    start_time = timeit.default_timer()
    intersections, keypoint_ind, _ = gt_scene_mesh.ray.intersects_location(
        src, drc, multiple_hits=False)
    if verbose:
        logger.debug("Case %d rays in %.6f seconds.", num_kpts,
                     timeit.default_timer() - start_time)

    return keypoint_ind, intersections
    def test_two_view_correspondences(self):
        """Tests the bundle adjustment for relative pose on a simulated scene."""

        i1Ri2 = EXAMPLE_DATA.get_camera(1).pose().rotation()
        i1ti2 = EXAMPLE_DATA.get_camera(1).pose().translation()
        i2Ti1 = Pose3(i1Ri2, i1ti2)
        camera_i1 = PinholeCameraCal3Bundler(Pose3(), Cal3Bundler())
        camera_i2 = PinholeCameraCal3Bundler(i2Ti1, Cal3Bundler())
        tracks_3d, valid_indices = TwoViewEstimator.triangulate_two_view_correspondences(
            camera_i1, camera_i2, self.keypoints_i1, self.keypoints_i2,
            self.corr_idxs)
        self.assertEqual(len(tracks_3d), 5)
        self.assertEqual(len(valid_indices), 5)
    def __runWithCheiralityException(self, obj: Point3dInitializer) -> bool:
        """Run the initialization in a a-cheiral setup, and check that the result is a None track."""

        cameras = obj.track_camera_dict

        # flip the cameras first
        yaw = np.pi
        camera_flip_pose = Pose3(Rot3.RzRyRx(yaw, 0, 0), np.zeros((3, 1)))
        flipped_cameras = {
            i: PinholeCameraCal3Bundler(cam.pose().compose(camera_flip_pose),
                                        cam.calibration())
            for i, cam in cameras.items()
        }

        obj_with_flipped_cameras = Point3dInitializer(
            flipped_cameras,
            obj.mode,
            obj.reproj_error_thresh,
            obj.num_ransac_hypotheses,
        )

        sfm_track = obj_with_flipped_cameras.triangulate(
            SfmTrack2d(MEASUREMENTS))

        return sfm_track is None
    def test_create_computation_graph(self):
        """Test the dask computation graph."""
        sharedCal = Cal3Bundler(1500, 0, 0, 640, 480)
        cameras = {
            i: PinholeCameraCal3Bundler(x, sharedCal)
            for (i, x) in enumerate(self.poses)
        }

        camera_graph = dask.delayed(cameras)

        corr_idxs_graph = {
            k: dask.delayed(v)
            for (k, v) in self.dummy_corr_idxs_dict.items()
        }

        keypoints_graph = [dask.delayed(x) for x in self.keypoints_list]

        da_graph = self.obj.create_computation_graph(camera_graph,
                                                     corr_idxs_graph,
                                                     keypoints_graph)

        with dask.config.set(scheduler="single-threaded"):
            dask_result = dask.compute(da_graph)[0]

        self.assertIsInstance(dask_result, SfmData)
Exemple #8
0
    def test_compute_keypoint_intersections(self) -> None:
        """Tests `compute_keypoint_intersections()` function."""
        # Create cube mesh with side length one centered at origin.
        box = trimesh.primitives.Box()

        # Create arrangement of 4 cameras in x-z plane pointing at the cube.
        fx, k1, k2, u0, v0 = 10, 0, 0, 1, 1
        calibration = Cal3Bundler(fx, k1, k2, u0, v0)
        cam_pos = [[2, 0, 0], [-2, 0, 0], [0, 0, 2], [0, 0, -2]]
        target_pos = [0, 0, 0]
        up_vector = [0, -1, 0]
        cams = [
            PinholeCameraCal3Bundler().Lookat(c, target_pos, up_vector,
                                              calibration) for c in cam_pos
        ]

        # Project keypoint at center of each simulated image and record intersection.
        kpt = Keypoints(coordinates=np.array([[1, 1]]).astype(np.float32))
        expected_intersections = [[0.5, 0, 0], [-0.5, 0, 0], [0, 0, 0.5],
                                  [0, 0, -0.5]]
        estimated_intersections = []
        for cam in cams:
            _, intersection = metric_utils.compute_keypoint_intersections(
                kpt, cam, box, verbose=True)
            estimated_intersections.append(intersection.flatten().tolist())
        np.testing.assert_allclose(expected_intersections,
                                   estimated_intersections)
Exemple #9
0
    def test_round_trip_images_txt(self) -> None:
        """Starts with a pose. Writes the pose to images.txt (in a temporary directory). Then reads images.txt to recover
        that same pose. Checks if the original wTc and recovered wTc match up."""

        # fmt: off
        # Rotation 45 degrees about the z-axis.
        original_wRc = np.array([[np.cos(np.pi / 4), -np.sin(np.pi / 4), 0],
                                 [np.sin(np.pi / 4),
                                  np.cos(np.pi / 4), 0], [0, 0, 1]])
        original_wtc = np.array([3, -2, 1])
        # fmt: on

        # Setup dummy GtsfmData Object with one image
        original_wTc = Pose3(Rot3(original_wRc), original_wtc)
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        camera = PinholeCameraCal3Bundler(original_wTc, default_intrinsics)
        gtsfm_data = GtsfmData(number_images=1)
        gtsfm_data.add_camera(0, camera)

        image = Image(value_array=None, file_name="dummy_image.jpg")
        images = [image]

        # Perform write and read operations inside a temporary directory
        with tempfile.TemporaryDirectory() as tempdir:
            images_fpath = os.path.join(tempdir, "images.txt")

            io_utils.write_images(gtsfm_data, images, tempdir)
            wTi_list, _ = io_utils.read_images_txt(images_fpath)
            recovered_wTc = wTi_list[0]

        npt.assert_almost_equal(original_wTc.matrix(),
                                recovered_wTc.matrix(),
                                decimal=3)
Exemple #10
0
    def __read_calibrations(self) -> List[PinholeCameraCal3Bundler]:
        """Read camera params from the calibration stored as h5 files.

        Returns:
            list of all cameras.
        """

        file_path_template = osp.join(self._folder, "calibration",
                                      "calibration_{}.h5")

        pose_list = []

        for image_name in self._image_names:
            file_path = file_path_template.format(image_name)
            calib_data = io_utils.load_h5(file_path)

            cTw = Pose3(Rot3(calib_data["R"]), calib_data["T"])
            K_matrix = calib_data["K"]
            # TODO: fix different fx and fy (and underparameterization of K)
            K = Cal3Bundler(
                fx=float(K_matrix[0, 0] + K_matrix[1, 1]) * 0.5,
                k1=0.0,
                k2=0.0,
                u0=float(K_matrix[0, 2]),
                v0=float(K_matrix[1, 2]),
            )

            pose_list.append(PinholeCameraCal3Bundler(cTw.inverse(), K))

        return pose_list
Exemple #11
0
def test_compute_track_reprojection_errors():
    """Ensure that reprojection error is computed properly within a track.

    # For camera 0:
    # [13] = [10,0,3]   [1,0,0 | 0]  [1]
    # [24] = [0,10,4] * [0,1,0 | 0] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]

    # For camera 1:
    # [-7] = [10,0,3]   [1,0,0 |-2]  [1]
    # [44] = [0,10,4] * [0,1,0 | 2] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]
    """
    wTi0 = Pose3(Rot3.RzRyRx(0, 0, 0), np.zeros((3, 1)))
    wTi1 = Pose3(Rot3.RzRyRx(0, 0, 0), np.array([2, -2, 0]))

    f = 10
    k1 = 0
    k2 = 0
    u0 = 3
    v0 = 4

    K0 = Cal3Bundler(f, k1, k2, u0, v0)
    K1 = Cal3Bundler(f, k1, k2, u0, v0)

    track_camera_dict = {
        0: PinholeCameraCal3Bundler(wTi0, K0),
        1: PinholeCameraCal3Bundler(wTi1, K1)
    }

    triangulated_pt = np.array([1, 2, 1])
    track_3d = SfmTrack(triangulated_pt)

    # in camera 0
    track_3d.addMeasurement(idx=0, m=np.array([13, 24]))
    # in camera 1
    track_3d.addMeasurement(idx=1, m=np.array(
        [-8, 43]))  # should be (-7,44), 1 px error in each dim

    errors, avg_track_reproj_error = reproj_utils.compute_track_reprojection_errors(
        track_camera_dict, track_3d)

    expected_errors = np.array([0, np.sqrt(2)])
    np.testing.assert_allclose(errors, expected_errors)
    assert avg_track_reproj_error == np.sqrt(2) / 2
    def test_get_camera_valid(self):
        """Test for get_camera for a valid index."""
        expected = PinholeCameraCal3Bundler(
            Pose3(), Cal3Bundler(fx=900, k1=0, k2=0, u0=100, v0=100))

        i = 0
        test_data = GtsfmData(1)
        test_data.add_camera(index=i, camera=expected)

        computed = test_data.get_camera(i)
        self.assertTrue(computed.equals(expected, EQUALITY_TOLERANCE))
Exemple #13
0
    def test_pinholeCameraCal3Bundler_roundtrip(self):
        """Test the round-trip on Unit3 object."""

        expected = PinholeCameraCal3Bundler(
            Pose3(Rot3.RzRyRx(0, 0.1, -0.05), np.random.randn(3, 1)),
            Cal3Bundler(fx=100, k1=0.1, k2=0.2, u0=100, v0=70),
        )
        header, frames = serialize(expected)
        recovered = deserialize(header, frames)

        self.assertTrue(expected.equals(recovered, 1e-5))
Exemple #14
0
def test_compute_point_reprojection_errors():
    """Ensure a hypothesized 3d point is projected correctly and compared w/ 2 measurements.
    # For camera 0:
    # [13] = [10,0,3]   [1,0,0 | 0]  [1]
    # [24] = [0,10,4] * [0,1,0 | 0] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]

    # For camera 1:
    # [-7] = [10,0,3]   [1,0,0 |-2]  [1]
    # [44] = [0,10,4] * [0,1,0 | 2] *[2]
    #  [1] = [0, 0,1]   [0,0,1 | 0]  [1]
    #                                [1]
    """
    wTi0 = Pose3(Rot3.RzRyRx(0, 0, 0), np.zeros((3, 1)))
    wTi1 = Pose3(Rot3.RzRyRx(0, 0, 0), np.array([2, -2, 0]))

    f = 10
    k1 = 0
    k2 = 0
    u0 = 3
    v0 = 4

    K0 = Cal3Bundler(f, k1, k2, u0, v0)
    K1 = Cal3Bundler(f, k1, k2, u0, v0)

    track_camera_dict = {
        0: PinholeCameraCal3Bundler(wTi0, K0),
        1: PinholeCameraCal3Bundler(wTi1, K1)
    }
    point3d = np.array([1, 2, 1])
    measurements = [
        SfmMeasurement(i=1, uv=np.array([-8, 43])),
        SfmMeasurement(i=0, uv=np.array([13, 24]))
    ]

    errors, avg_track_reproj_error = reproj_utils.compute_point_reprojection_errors(
        track_camera_dict, point3d, measurements)
    expected_errors = np.array([np.sqrt(2), 0])
    np.testing.assert_allclose(errors, expected_errors)
    assert avg_track_reproj_error == np.sqrt(2) / 2
Exemple #15
0
def calculate_triangulation_angles_in_degrees(
        camera_1: PinholeCameraCal3Bundler, camera_2: PinholeCameraCal3Bundler,
        points_3d: np.ndarray) -> np.ndarray:
    """Vectorized. calculuation of the angles formed at 3D points by the rays backprojected from 2 cameras.
    In the setup with X (point_3d) and two cameras C1 and C2, the triangulation angle is the angle between rays C1-X
    and C2-X, i.e. the angle subtendted at the 3d point.
        X
       / \
      /   \
     /     \
    C1      C2
    References:
    - https://github.com/colmap/colmap/blob/dev/src/base/triangulation.cc#L122
    
    Args:
        camera_1: the first camera.
        camera_2: the second camera.
        points_3d: (N,3) 3d points which are imaged by the two camera centers, and where the angle between the
                  light rays associated with the measurements are computed.
    Returns:
        the angles formed at the 3d points, in degrees.

    https://github.com/colmap/colmap/blob/dev/src/base/triangulation.cc#L147
    """
    camera_center_1: np.ndarray = camera_1.pose().translation()
    camera_center_2: np.ndarray = camera_2.pose().translation()

    N = points_3d.shape[0]
    # ensure broadcasting is in the correct direction
    rays1 = points_3d - camera_center_1.reshape(1, 3)
    rays2 = points_3d - camera_center_2.reshape(1, 3)

    # normalize rays to unit length
    rays1 /= np.linalg.norm(rays1, axis=1).reshape(N, 1)
    rays2 /= np.linalg.norm(rays2, axis=1).reshape(N, 1)

    dot_products = np.multiply(rays1, rays2).sum(axis=1)
    dot_products = np.clip(dot_products, -1, 1)
    angles_rad = np.arccos(dot_products)
    angles_deg = np.rad2deg(angles_rad)
    return angles_deg
    def test_data_association_with_missing_camera(self):
        """Tests the data association with input tracks which use a camera index for which the camera doesn't exist."""

        triangulation_options = TriangulationOptions(
            reproj_error_threshold=5,
            mode=TriangulationSamplingMode.NO_RANSAC,
            min_num_hypotheses=20)
        da = DataAssociation(min_track_len=3,
                             triangulation_options=triangulation_options)

        # add cameras 0 and 2
        cameras = {
            0:
            PinholeCameraCal3Bundler(
                Pose3(Rot3.RzRyRx(0, np.deg2rad(20), 0), np.zeros((3, 1)))),
            2:
            PinholeCameraCal3Bundler(
                Pose3(Rot3.RzRyRx(0, 0, 0), np.array([10, 0, 0]))),
        }

        # just have one track, chaining cams 0->1 , and cams 1->2
        corr_idxs_dict = {
            (0, 1): np.array([[0, 0]], dtype=np.int32),
            (1, 2): np.array([[0, 0]], dtype=np.int32)
        }
        keypoints_shared = Keypoints(coordinates=np.array([[20.0, 10.0]]))

        # will lead to a cheirality exception because keypoints are identical in two cameras
        # no track will be formed, and thus connected component will be empty
        sfm_data, _ = da.run(
            num_images=3,
            cameras=cameras,
            corr_idxs_dict=corr_idxs_dict,
            keypoints_list=[keypoints_shared] * 3,
            cameras_gt=[None] * 3,
            relative_pose_priors={},
        )

        self.assertEqual(len(sfm_data.get_valid_camera_indices()), 0)
        self.assertEqual(sfm_data.number_tracks(), 0)
Exemple #17
0
    def test_mesh_inlier_correspondences(self) -> None:
        """Tests `compute_keypoint_intersections()` function.

        We arrange four cameras in the x-z plane around a cube centered at the origin with side length 1. These cameras
        are placed at (2, 0, 0), (-2, 0, 0), (0, 0, 2) and (0, 0, -2). We project a single 3d point located at the
        origin into each camera. Since the cube has unit length on each dimension, we expect a keypoint located at the
        center of each image to be found at the boundary of the cube -- 0.5 meters from the origin for each side on the
        z-x plane.
        """
        # Create cube mesh with side length one centered at origin.
        box = trimesh.primitives.Box()

        # Create arrangement of two cameras pointing at the center of one of the cube's faces.
        fx, k1, k2, u0, v0 = 10, 0, 0, 1, 1
        calibration = Cal3Bundler(fx, k1, k2, u0, v0)
        cam_pos = [[2, 1, 0], [2, -1, 0]]
        target_pos = [0.5, 0, 0]
        up_vector = [0, -1, 0]
        cam_i1 = PinholeCameraCal3Bundler().Lookat(cam_pos[0], target_pos,
                                                   up_vector, calibration)
        cam_i2 = PinholeCameraCal3Bundler().Lookat(cam_pos[1], target_pos,
                                                   up_vector, calibration)
        keypoints_i1 = Keypoints(
            coordinates=np.array([[1, 1]]).astype(np.float32))
        keypoints_i2 = Keypoints(
            coordinates=np.array([[1, 1]]).astype(np.float32))

        # Project keypoint at center of each simulated image and record intersection.
        is_inlier, reproj_err = metric_utils.mesh_inlier_correspondences(
            keypoints_i1,
            keypoints_i2,
            cam_i1,
            cam_i2,
            box,
            dist_threshold=0.1)
        assert np.count_nonzero(is_inlier) == 1
        assert reproj_err[0] < 1e-4
    def test_select_largest_connected_component(self, graph_largest_cc_mock):
        """Test pruning to largest connected component according to tracks.
        
        The function under test calles the graph utility, which has been mocked and we test the call against the mocked
        object.
        """
        gtsfm_data = GtsfmData(5)
        cam = PinholeCameraCal3Bundler(Pose3(), Cal3Bundler())

        # add the same camera at all indices
        for i in range(gtsfm_data.number_images()):
            gtsfm_data.add_camera(i, cam)

        # add two tracks to create two connected components
        track_1 = SfmTrack(
            np.random.randn(3))  # track with 2 cameras, which will be dropped
        track_1.add_measurement(idx=0, m=np.random.randn(2))
        track_1.add_measurement(idx=3, m=np.random.randn(2))

        track_2 = SfmTrack(
            np.random.randn(3))  # track with 3 cameras, which will be retained
        track_2.add_measurement(idx=1, m=np.random.randn(2))
        track_2.add_measurement(idx=2, m=np.random.randn(2))
        track_2.add_measurement(idx=4, m=np.random.randn(2))

        gtsfm_data.add_track(track_1)
        gtsfm_data.add_track(track_2)

        largest_component_data = gtsfm_data.select_largest_connected_component(
        )

        # check the graph util function called with the edges defined by tracks
        graph_largest_cc_mock.assert_called_once_with([(0, 3), (1, 2), (1, 4),
                                                       (2, 4)])

        # check the expected cameras coming just from track_2
        expected_camera_indices = [1, 2, 4]
        computed_camera_indices = largest_component_data.get_valid_camera_indices(
        )
        self.assertListEqual(computed_camera_indices, expected_camera_indices)

        # check that there is just one track
        expected_num_tracks = 1
        computed_num_tracks = largest_component_data.number_tracks()
        self.assertEqual(computed_num_tracks, expected_num_tracks)

        # check the exact track
        computed_track = largest_component_data.get_track(0)
        self.assertTrue(computed_track.equals(track_2, EQUALITY_TOLERANCE))
Exemple #19
0
    def testSimpleTriangulationOnDoorDataset(self):
        """Test the tracks of the door dataset using simple triangulation initialization. Using computed tracks with
        ground truth camera params.

        Expecting failures on 2 tracks which have incorrect matches."""
        with open(DOOR_TRACKS_PATH, "rb") as handle:
            tracks = pickle.load(handle)

        loader = FolderLoader(DOOR_DATASET_PATH, image_extension="JPG")

        camera_dict = {
            i: PinholeCameraCal3Bundler(loader.get_camera_pose(i),
                                        loader.get_camera_intrinsics(i))
            for i in range(len(loader))
        }

        initializer = Point3dInitializer(camera_dict,
                                         TriangulationParam.NO_RANSAC,
                                         reproj_error_thresh=1e5)

        # tracks which have expected failures
        # (both tracks have incorrect measurements)
        expected_failures = [
            SfmTrack2d(measurements=[
                SfmMeasurement(i=1,
                               uv=np.array([1252.22729492, 1487.29431152])),
                SfmMeasurement(i=2,
                               uv=np.array([1170.96679688, 1407.35876465])),
                SfmMeasurement(i=4, uv=np.array([263.32104492, 1489.76965332
                                                 ])),
            ]),
            SfmTrack2d(measurements=[
                SfmMeasurement(i=6, uv=np.array([1142.34545898, 735.92169189
                                                 ])),
                SfmMeasurement(i=7, uv=np.array([1179.84155273, 763.04095459
                                                 ])),
                SfmMeasurement(i=9, uv=np.array([216.54107666, 774.74017334])),
            ]),
        ]

        for track_2d in tracks:
            triangulated_track = initializer.triangulate(track_2d)

            if triangulated_track is None:
                # assert we have failures which are already expected
                self.assertIn(track_2d, expected_failures)
Exemple #20
0
    def test_colmap2gtsfm(self) -> None:
        """Tests the colmap2gtsfm static method by forward projecting tracks.

        This test also verifys that all data was read and ordered correctly.
        """
        uvs_measured, uvs_expected = [], []
        for idx_sfmtrack in TEST_SFMTRACKS_INDICES:
            sfmtrack = self.loader.get_sfmtrack(idx_sfmtrack)
            for idx_meas in range(sfmtrack.numberMeasurements()):
                image_id, uv_measured = sfmtrack.measurement(idx_meas)
                cal3 = self.loader.get_camera_intrinsics(image_id)
                wTi = self.loader.get_camera_pose(image_id)
                cam = PinholeCameraCal3Bundler(wTi, cal3)
                uv_expected, _ = cam.projectSafe(sfmtrack.point3())
                uvs_measured.append(uv_measured)
                uvs_expected.append(uv_expected)
        # assert all to within 1 pixel absolute difference
        np.testing.assert_allclose(uvs_measured, uvs_expected, atol=1)
Exemple #21
0
    def test_round_trip_cameras_txt(self) -> None:
        """Creates a two cameras and writes to cameras.txt (in a temporary directory). Then reads cameras.txt to recover
        the information. Checks if the original and recovered cameras match up."""

        # Create multiple calibration data
        k1 = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        k2 = Cal3Bundler(fx=200, k1=0.001, k2=0, u0=1000, v0=2000)
        k3 = Cal3Bundler(fx=300, k1=0.004, k2=0.001, u0=1001, v0=2002)
        original_calibrations = [k1, k2, k3]

        gtsfm_data = GtsfmData(number_images=len(original_calibrations))

        # Populate gtsfm_data with the generated vales
        for i in range(len(original_calibrations)):
            camera = PinholeCameraCal3Bundler(Pose3(),
                                              original_calibrations[i])
            gtsfm_data.add_camera(i, camera)

        # Generate dummy images
        image = Image(value_array=np.zeros((240, 320)),
                      file_name="dummy_image.jpg")
        images = [image for i in range(len(original_calibrations))]

        # Round trip
        with tempfile.TemporaryDirectory() as tempdir:
            cameras_fpath = os.path.join(tempdir, "cameras.txt")

            io_utils.write_cameras(gtsfm_data, images, tempdir)
            recovered_calibrations = io_utils.read_cameras_txt(cameras_fpath)

        self.assertEqual(len(original_calibrations),
                         len(recovered_calibrations))

        for i in range(len(recovered_calibrations)):
            K_ori = original_calibrations[i]
            K_rec = recovered_calibrations[i]

            self.assertEqual(K_ori.fx(), K_rec.fx())
            self.assertEqual(K_ori.px(), K_rec.px())
            self.assertEqual(K_ori.py(), K_rec.py())
            self.assertEqual(K_ori.k1(), K_rec.k1())
            self.assertEqual(K_ori.k2(), K_rec.k2())
def generate_noisy_2d_measurements(
    world_point: Point3,
    calibrations: List[Cal3Bundler],
    per_image_noise_vecs: np.ndarray,
    poses: Pose3Vector,
) -> Tuple[List[Keypoints], List[Tuple[int, int]], Dict[
        int, PinholeCameraCal3Bundler], ]:
    """
    Generate PinholeCameras from specified poses and calibrations, and then generate
    1 measurement per camera of a given 3d point.

    Args:
        world_point: 3d coords of 3d landmark in world frame
        calibrations: List of calibrations for each camera
        noise_params: List of amounts of noise to be added to each measurement
        poses: List of poses for each camera in world frame

    Returns:
        keypoints_list: List of keypoints in all images (projected measurements in all images)
        img_idxs: Tuple of indices for all images
        cameras: Dictionary mapping image index i to calibrated PinholeCamera object
    """
    keypoints_list = []
    measurements = Point2Vector()
    cameras = dict()
    for i in range(len(poses)):
        camera = PinholeCameraCal3Bundler(poses[i], calibrations[i])
        # Project landmark into two cameras and triangulate
        z = camera.project(world_point)
        cameras[i] = camera
        measurement = z + per_image_noise_vecs[i]
        measurements.append(measurement)
        keypoints_list += [Keypoints(coordinates=measurement.reshape(1, 2))]

    # Create image indices for each pose - only subsequent pairwise matches
    # assumed, e.g. between images (0,1) and images (1,2)
    img_idxs = []
    for i in range(len(poses) - 1):
        img_idxs += [(i, i + 1)]

    return keypoints_list, img_idxs, cameras
Exemple #23
0
def init_cameras(
    wRi_list: List[Optional[Rot3]],
    wti_list: List[Optional[Point3]],
    intrinsics_list: List[Cal3Bundler],
) -> Dict[int, PinholeCameraCal3Bundler]:
    """Generate camera from valid rotations and unit-translations.

    Args:
        wRi_list: rotations for cameras.
        wti_list: translations for cameras.
        intrinsics_list: intrinsics for cameras.
    Returns:
        Valid cameras.
    """
    cameras = {}

    for idx, (wRi, wti) in enumerate(zip(wRi_list, wti_list)):
        if wRi is not None and wti is not None:
            cameras[idx] = PinholeCameraCal3Bundler(Pose3(wRi, wti),
                                                    intrinsics_list[idx])

    return cameras
Exemple #24
0
    def test_point_cloud_cameras_locked(self) -> None:
        """Tests the get_ortho_axis_alignment_transform() function with a GtsfmData object containing 11 point cloud
        points and 12 camera frustums from the door-12 dataset. Determines if the points and frustums are properly
        "locked" in with one another before and after the alignment transformation is applied.
        """
        sample_data = GtsfmData(number_images=12)

        # Instantiate OlssonLoader to read camera poses from door12 dataset.
        wTi_list = self.loader._wTi_list

        # Add 12 camera frustums to sample_data.
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        for idx, pose in enumerate(wTi_list):
            camera = PinholeCameraCal3Bundler(pose, default_intrinsics)
            sample_data.add_camera(idx, camera)

        # fmt: off
        # These points are taken directly from the first 11 points generated by GTSFM on the door12 dataset (without
        # any separate alignment transformation being applied)
        points_3d = np.array(
            [[-1.4687794397729077, -1.4966178675020756, 14.583277665978546],
             [-1.6172612359102505, -1.0951470733744013, 14.579095414379562],
             [-3.2190882723771783, -4.463465966172758, 14.444076631000476],
             [-0.6754206497590093, -1.1132530165104157, 14.916222213341355],
             [-1.5514099044537981, -1.305810425894855, 14.584788688422206],
             [-1.551319353347404, -1.304881682597853, 14.58246449772602],
             [-1.9055918588057448, -1.192867982227922, 14.446379510423219],
             [-1.5936792439193013, -1.4398818807488012, 14.587749795933021],
             [-1.5937405395983737, -1.4401641027442411, 14.588167699143174],
             [-1.6599318889904735, -1.2273604755959784, 14.57861988411431],
             [2.1935589900444867, 1.6233406628428935, 12.610234497076608]])
        # fmt: on

        # Add all point cloud points to sample_data
        for point_3d in points_3d:
            sample_data.add_track(SfmTrack(point_3d))

        camera_translations = np.array(
            [pose.translation() for pose in sample_data.get_camera_poses()])
        initial_relative_distances = scipy.spatial.distance.cdist(
            camera_translations, points_3d, metric="euclidean")

        # Apply alignment transformation to sample_data
        walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(
            sample_data)
        walignedSw = Similarity3(R=walignedTw.rotation(),
                                 t=walignedTw.translation(),
                                 s=1.0)
        sample_data = sample_data.apply_Sim3(walignedSw)

        # Aggregate the final, transformed points
        num_tracks = sample_data.number_tracks()
        transformed_points_3d = [
            np.array(sample_data.get_track(i).point3())
            for i in range(num_tracks)
        ]
        transformed_points_3d = np.array(transformed_points_3d)
        transformed_camera_translations = np.array(
            [pose.translation() for pose in sample_data.get_camera_poses()])

        final_relative_distances = scipy.spatial.distance.cdist(
            transformed_camera_translations,
            transformed_points_3d,
            metric="euclidean")

        npt.assert_almost_equal(final_relative_distances,
                                initial_relative_distances,
                                decimal=3)
Exemple #25
0
def mesh_inlier_correspondences(
    keypoints_i1: Keypoints,
    keypoints_i2: Keypoints,
    gt_camera_i1: PinholeCameraCal3Bundler,
    gt_camera_i2: PinholeCameraCal3Bundler,
    gt_scene_mesh: Trimesh,
    dist_threshold: float,
) -> Tuple[np.ndarray, np.ndarray]:
    """Compute inlier correspondences using the ground truth triangular surface mesh of the scene. First, rays are
    back-projected at each keypoint in the images and intersections between these rays and the ground truth mesh are
    recorded. Next, given a match, the mesh intersections corresponding to each keypoint are forward-projected into the
    other image and the reprojection error is computed to decide whether the match is an inlier.

    Args:
        keypoints_i1: N keypoints in image i1.
        keypoints_i2: N corresponding keypoints in image i2.
        gt_camera_i1: ground truth camera for image i1, i.e., wTi1 and intrinsics.
        gt_camera_i1: ground truth camera for image i2, i.e., wTi2 and intrinsics.
        gt_scene_mesh: ground truth triangular surface mesh of the scene in the world frame.
        dist_threshold: max acceptable reprojection error (in pixels) between image coordinates of ground truth landmark
            and keypoint.

    Returns:
        is_inlier: (N, ) mask of inlier correspondences.
        reproj_err: maximum error between forward-projected ground truth landmark and corresponding keypoints

    Raises:
        ValueError if the number of keypoints do not match.
    """
    if len(keypoints_i1) != len(keypoints_i2):
        raise ValueError("Keypoints must have same counts")
    n_corrs = len(keypoints_i1)
    is_inlier = np.zeros(n_corrs, dtype=bool)

    # Perform ray tracing to compute keypoint intersections.
    keypoint_ind_i1, intersections_i1 = compute_keypoint_intersections(
        keypoints_i1, gt_camera_i1, gt_scene_mesh)
    keypoint_ind_i2, intersections_i2 = compute_keypoint_intersections(
        keypoints_i2, gt_camera_i2, gt_scene_mesh)
    keypoint_ind, i1_idx, i2_idx = np.intersect1d(keypoint_ind_i1,
                                                  keypoint_ind_i2,
                                                  return_indices=True)

    # Forward project intersections into other image to compute error.
    reproj_err = np.array([np.nan] * len(keypoints_i1))
    for i in range(len(keypoint_ind)):
        uv_i1 = keypoints_i1.coordinates[keypoint_ind[i]]
        uv_i2 = keypoints_i2.coordinates[keypoint_ind[i]]
        uv_i2i1, success_flag_i1 = gt_camera_i1.projectSafe(
            intersections_i2[i2_idx[i]])
        uv_i1i2, success_flag_i2 = gt_camera_i2.projectSafe(
            intersections_i1[i1_idx[i]])
        if success_flag_i1 and success_flag_i2:
            err_i2i1 = np.linalg.norm(uv_i1 - uv_i2i1)
            err_i1i2 = np.linalg.norm(uv_i2 - uv_i1i2)
            is_inlier[keypoint_ind[i]] = max(err_i2i1,
                                             err_i1i2) < dist_threshold
            reproj_err[keypoint_ind[i]] = max(err_i2i1, err_i1i2)
        else:
            is_inlier[keypoint_ind[i]] = False
            reproj_err[keypoint_ind[i]] = np.nan

    return is_inlier, reproj_err
Exemple #26
0
def compute_correspondence_metrics(
    keypoints_i1: Keypoints,
    keypoints_i2: Keypoints,
    corr_idxs_i1i2: np.ndarray,
    intrinsics_i1: Cal3Bundler,
    intrinsics_i2: Cal3Bundler,
    dist_threshold: float,
    gt_wTi1: Optional[Pose3] = None,
    gt_wTi2: Optional[Pose3] = None,
    gt_scene_mesh: Optional[Trimesh] = None,
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
    """Checks the correspondences for epipolar distances and counts ones which are below the threshold.

    Args:
        keypoints_i1: keypoints in image i1.
        keypoints_i2: corr. keypoints in image i2.
        intrinsics_i1: intrinsics for i1.
        intrinsics_i2: intrinsics for i2.
        dist_threshold: max acceptable distance for a correct correspondence.
        gt_wTi1: ground truth pose of image i1.
        gt_wTi2: ground truth pose of image i2.
        gt_scene_mesh: ground truth triangular surface mesh of the scene in the world frame.

    Raises:
        ValueError: when the number of keypoints do not match.

    Returns:
        Boolean mask of which verified correspondences are classified as correct under Sampson error
            (using GT epipolar geometry).
        Reprojection error for every verified correspondence against GT geometry.
    """
    if corr_idxs_i1i2.size == 0:
        return None, None

    if gt_wTi1 is None or gt_wTi2 is None:
        return None, None

    # Compute ground truth correspondences.
    matched_keypoints_i1 = keypoints_i1.extract_indices(corr_idxs_i1i2[:, 0])
    matched_keypoints_i2 = keypoints_i2.extract_indices(corr_idxs_i1i2[:, 1])
    # Check to see if a GT mesh is provided.
    if gt_scene_mesh is not None:
        gt_camera_i1 = PinholeCameraCal3Bundler(gt_wTi1, intrinsics_i1)
        gt_camera_i2 = PinholeCameraCal3Bundler(gt_wTi2, intrinsics_i2)
        return mesh_inlier_correspondences(
            matched_keypoints_i1,
            matched_keypoints_i2,
            gt_camera_i1,
            gt_camera_i2,
            gt_scene_mesh,
            dist_threshold,
        )

    # If no mesh is provided, use squared Sampson error.
    gt_i2Ti1 = gt_wTi2.between(gt_wTi1)
    return epipolar_inlier_correspondences(
        matched_keypoints_i1,
        matched_keypoints_i2,
        intrinsics_i1,
        intrinsics_i2,
        gt_i2Ti1,
        dist_threshold,
    )
Exemple #27
0
 def test_pinholeCameraCal3Bundler_roundtrip(self):
     obj = PinholeCameraCal3Bundler(
         Pose3(Rot3.RzRyRx(0, 0.1, -0.05), Point3(1, 1, 0)),
         Cal3Bundler(fx=100, k1=0.1, k2=0.2, u0=100, v0=70),
     )
     self.assertEqualityOnPickleRoundtrip(obj)
Exemple #28
0
    def test_get_stats_for_sfmdata_skydio32(self) -> None:
        """Real data, from Skydio-32 sequence with the SIFT front-end, if Sim(3) object is corrupted with negative scale.

        All tracks should have NaN reprojection error, from cheirality errors from the negative scale.
        """

        fx, k1, k2, px, py = 609.94, -8.82687675e-07, -4.25111629e-06, 507, 380
        calib = Cal3Bundler(fx, k1, k2, px, py)

        wTi2 = Pose3(
            Rot3([
                [0.95813172, 0.00187844098, -0.286321635],
                [-0.00477926501, 0.999944088, -0.00943285149],
                [0.286287907, 0.0104063212, 0.958087127],
            ]),
            Point3(-0.763405555, -0.266165811, -0.118153783),
        )
        wTi3 = Pose3(
            Rot3([
                [0.999985712, 0.00523414604, -0.00108604007],
                [-0.00524091331, 0.999966264, -0.00632478431],
                [0.00105289859, 0.00633038578, 0.999979409],
            ]),
            Point3(-0.76321922 - 0.266172481 - 0.118146617),
        )
        wTi4 = Pose3(
            Rot3([
                [0.963327067, 0.00797924699, 0.268211287],
                [-0.0070859796, 0.999965656, -0.00429831651],
                [-0.268236373, 0.00224014493, 0.963350523],
            ]),
            Point3(-0.76303985 - 0.266175812 - 0.11817041),
        )
        wTi5 = Pose3(
            Rot3([
                [0.965261642, 0.00828550155, 0.261153812],
                [-0.00485679326, 0.99989337, -0.0137717378],
                [-0.261240071, 0.0120249602, 0.965198956],
            ]),
            Point3(-0.762848965 - 0.266179234 - 0.118209764),
        )
        wTi30 = Pose3(
            Rot3([
                [0.761797609, -0.0189987841, 0.647536446],
                [0.000647893104, 0.999591701, 0.0285659033],
                [-0.647814775, -0.0213419024, 0.761498877],
            ]),
            Point3(-0.780003719, -0.266108015, -0.154604541),
        )
        aligned_cameras = {
            2: PinholeCameraCal3Bundler(wTi2, calib),
            3: PinholeCameraCal3Bundler(wTi3, calib),
            4: PinholeCameraCal3Bundler(wTi4, calib),
            5: PinholeCameraCal3Bundler(wTi5, calib),
            30: PinholeCameraCal3Bundler(wTi30, calib),
        }

        t0 = SfmTrack(pt=[-0.7627727, -0.26624048, -0.11879795])
        t0.addMeasurement(2, [184.08586121, 441.31314087])
        t0.addMeasurement(4, [18.98637581, 453.21853638])

        t1 = SfmTrack(pt=[-0.76277714, -0.26603358, -0.11884205])
        t1.addMeasurement(2, [213.51266479, 288.06637573])
        t1.addMeasurement(4, [50.23059464, 229.30541992])

        t2 = SfmTrack(pt=[-0.7633115, -0.2662322, -0.11826181])
        t2.addMeasurement(2, [227.52420044, 695.15087891])
        t2.addMeasurement(3, [996.67608643, 705.03125])

        t3 = SfmTrack(pt=[-0.76323087, -0.26629859, -0.11836833])
        t3.addMeasurement(2, [251.37863159, 702.97064209])
        t3.addMeasurement(3, [537.9753418, 732.26025391])

        t4 = SfmTrack(pt=[-0.70450081, -0.28115719, -0.19063382])
        t4.addMeasurement(2, [253.17749023, 490.47991943])
        t4.addMeasurement(3, [13.17782784, 507.57717896])

        t5 = SfmTrack(pt=[-0.52781989, -0.31926005, -0.40763909])
        t5.addMeasurement(2, [253.52301025, 478.41384888])
        t5.addMeasurement(3, [10.92995739, 493.31018066])

        t6 = SfmTrack(pt=[-0.74893948, -0.27132075, -0.1360136])
        t6.addMeasurement(2, [254.64611816, 533.04730225])
        t6.addMeasurement(3, [18.78449249, 557.05041504])

        aligned_tracks = [t0, t1, t2, t3, t4, t5, t6]
        aligned_filtered_data = GtsfmData.from_cameras_and_tracks(
            cameras=aligned_cameras, tracks=aligned_tracks, number_images=32)
        metrics = metric_utils.get_stats_for_sfmdata(aligned_filtered_data,
                                                     suffix="_filtered")

        assert metrics[0].name == "number_cameras"
        assert np.isclose(metrics[0]._data, np.array(5.0, dtype=np.float32))

        assert metrics[1].name == "number_tracks_filtered"
        assert np.isclose(metrics[1]._data, np.array(7.0, dtype=np.float32))

        assert metrics[2].name == "3d_track_lengths_filtered"
        assert metrics[2].summary == {
            "min": 2,
            "max": 2,
            "median": 2.0,
            "mean": 2.0,
            "stddev": 0.0,
            "histogram": {
                "1": 7
            },
        }

        assert metrics[3].name == "reprojection_errors_filtered_px"
        assert metrics[3].summary == {
            "min": np.nan,
            "max": np.nan,
            "median": np.nan,
            "mean": np.nan,
            "stddev": np.nan
        }
Exemple #29
0
    def test_get_ortho_axis_alignment_transform(self) -> None:
        """Tests the get_ortho_axis_alignment_transform() function with a GtsfmData object containing 3 camera frustums
        and 6 points in the point cloud. All points lie on z=0 plane. All frustums lie on z=2 plane and look down on
        the z=0 plane.

           sample_data:              output_data:

               y                         y
               |                         o
               |                         |
               |     o                   |
           o   |                         c
             c | c                       |
          ------------- x   ==>  --o--c-----c--o-- x
             o | c                       |
               |   o                     o
               |                         |

        c = point at (xi,yi,0) with a camera frustum at (xi,yi,2)
        o = point at (xi,yi,0)
        """
        sample_data = GtsfmData(number_images=3)
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)

        # Add 3 camera frustums to sample_data (looking down at z=0 plane)
        cam_translations = np.array([[-1, 1, 2], [1, 1, 2], [1, -1, 2]])

        for i in range(len(cam_translations)):
            camera = PinholeCameraCal3Bundler(
                Pose3(Rot3(), cam_translations[i, :]), default_intrinsics)
            sample_data.add_camera(i, camera)

        # Add 6 tracks to sample_data

        # fmt: off
        points3d = np.array([
            [1, 1, 0],
            [-1, 1, 0],
            [-2, 2, 0],
            [-1, -1, 0],
            [1, -1, 0],
            [2, -2, 0],
            [5, 5, 0]  # represents an outlier in this set of points
        ])
        # fmt: on

        for pt_3d in points3d:
            sample_data.add_track(SfmTrack(pt_3d))

        # Apply alignment transformation to sample_data
        walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(
            sample_data)
        walignedSw = Similarity3(R=walignedTw.rotation(),
                                 t=walignedTw.translation(),
                                 s=1.0)
        sample_data = sample_data.apply_Sim3(walignedSw)

        # Verify correct 3d points.
        computed_3d_points = np.array([
            sample_data.get_track(i).point3()
            for i in range(sample_data.number_tracks())
        ])
        expected_3d_points = np.array([
            [0, np.sqrt(2), 0],
            [-np.sqrt(2), 0, 0],
            [-2 * np.sqrt(2), 0, 0],
            [0, -np.sqrt(2), 0],
            [np.sqrt(2), 0, 0],
            [2 * np.sqrt(2), 0, 0],
            [0, 5 * np.sqrt(2), 0],
        ])
        npt.assert_almost_equal(computed_3d_points,
                                expected_3d_points,
                                decimal=3)

        # Verify correct camera poses.
        expected_wTi_list = [
            Pose3(walignedTw.rotation(), np.array([-np.sqrt(2), 0, 2])),
            Pose3(walignedTw.rotation(), np.array([0, np.sqrt(2), 2])),
            Pose3(walignedTw.rotation(), np.array([np.sqrt(2), 0, 2])),
        ]

        computed_wTi_list = sample_data.get_camera_poses()
        for wTi_computed, wTi_expected in zip(computed_wTi_list,
                                              expected_wTi_list):
            assert wTi_computed.equals(wTi_expected, tol=1e-9)
Exemple #30
0
from gtsfm.data_association.point3d_initializer import (
    Point3dInitializer,
    TriangulationParam,
)
from gtsfm.loader.folder_loader import FolderLoader

# path for data used in this test
DATA_ROOT_PATH = Path(__file__).resolve().parent.parent / "data"
DOOR_TRACKS_PATH = DATA_ROOT_PATH / "tracks2d_door.pickle"
DOOR_DATASET_PATH = DATA_ROOT_PATH / "set1_lund_door"

# focal length set to 50 px, with `px`, `py` set to zero
CALIBRATION = Cal3Bundler(50, 0, 0, 0, 0)
# Generate 8 camera poses arranged in a circle of radius 40 m
CAMERAS = {
    i: PinholeCameraCal3Bundler(pose, CALIBRATION)
    for i, pose in enumerate(
        SFMdata.createPoses(
            Cal3_S2(
                CALIBRATION.fx(),
                CALIBRATION.fx(),
                0,
                CALIBRATION.px(),
                CALIBRATION.py(),
            )))
}
LANDMARK_POINT = Point3(0.0, 0.0, 0.0)
MEASUREMENTS = [
    SfmMeasurement(i, cam.project(LANDMARK_POINT))
    for i, cam in CAMERAS.items()
]