예제 #1
0
파일: gtsfm_data.py 프로젝트: borglab/gtsfm
    def apply_Sim3(self, aSb: Similarity3) -> "GtsfmData":
        """Assume current tracks and cameras are in frame "b", then transport them to frame "a".

        Returns:
            New GtsfmData object which has been transformed from frame a to frame b.
        """
        bTi_list = self.get_camera_poses()
        aTi_list = [aSb.transformFrom(bTi) if bTi is not None else None for bTi in bTi_list]
        aligned_data = GtsfmData(number_images=self.number_images())

        # Update the camera poses to their aligned poses, but use the previous calibration.
        for i, aTi in enumerate(aTi_list):
            if aTi is None:
                continue
            calibration = self.get_camera(i).calibration()
            camera_type = gtsfm_types.get_camera_class_for_calibration(calibration)
            aligned_data.add_camera(i, camera_type(aTi, calibration))
        # Align estimated tracks to ground truth.
        for j in range(self.number_tracks()):
            # Align each 3d point
            track_b = self.get_track(index=j)
            # Place into the "a" reference frame
            pt_a = aSb.transformFrom(track_b.point3())
            track_a = SfmTrack(pt_a)
            # Copy over the 2d measurements directly into the new track.
            for k in range(track_b.numberMeasurements()):
                i, uv = track_b.measurement(k)
                track_a.addMeasurement(i, uv)
            aligned_data.add_track(track_a)

        return aligned_data
예제 #2
0
파일: test_Sim3.py 프로젝트: pu-wei/gtsam
    def test_align_poses_along_straight_line_gauge(self):
        """Test if Align Pose3Pairs method can account for gauge ambiguity.

        Scenario:
           3 object poses
           with gauge ambiguity (2x scale)
           world frame has poses rotated about z-axis (90 degree yaw)
           world and egovehicle frame translated by 11 meters w.r.t. each other
        """
        Rz90 = Rot3.Rz(np.deg2rad(90))

        # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
        # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
        eTo0 = Pose3(Rot3(), np.array([1, 0, 0]))
        eTo1 = Pose3(Rot3(), np.array([2, 0, 0]))
        eTo2 = Pose3(Rot3(), np.array([4, 0, 0]))

        eToi_list = [eTo0, eTo1, eTo2]

        # Create destination poses
        # (same three objects, but instead living in the world/city "w" frame)
        wTo0 = Pose3(Rz90, np.array([0, 12, 0]))
        wTo1 = Pose3(Rz90, np.array([0, 14, 0]))
        wTo2 = Pose3(Rz90, np.array([0, 18, 0]))

        wToi_list = [wTo0, wTo1, wTo2]

        we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list)))

        # Recover the transformation wSe (i.e. world_S_egovehicle)
        wSe = Similarity3.Align(we_pairs)

        for wToi, eToi in zip(wToi_list, eToi_list):
            self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
예제 #3
0
def align_poses_sim3(aTi_list: List[Pose3],
                     bTi_list: List[Pose3]) -> Similarity3:
    """Align two pose graphs via similarity transformation. Note: poses cannot be missing/invalid.

    We force SIM(3) alignment rather than SE(3) alignment.
    We assume the two trajectories are of the exact same length.

    Args:
        aTi_list: reference poses in frame "a" which are the targets for alignment
        bTi_list: input poses which need to be aligned to frame "a"

    Returns:
        aSb: Similarity(3) object that aligns the two pose graphs.
    """
    n_to_align = len(aTi_list)
    assert len(aTi_list) == len(bTi_list)
    assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames"

    ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list)))

    aSb = Similarity3.Align(ab_pairs)

    if np.isnan(aSb.scale()) or aSb.scale() == 0:
        # we have run into a case where points have no translation between them (i.e. panorama).
        # We will first align the rotations and then align the translation by using centroids.
        # TODO: handle it in GTSAM

        # align the rotations first, so that we can find the translation between the two panoramas
        aSb = Similarity3(aSb.rotation(), np.zeros((3, )), 1.0)
        aTi_list_rot_aligned = [aSb.transformFrom(bTi) for bTi in bTi_list]

        # fit a single translation motion to the centroid
        aTi_centroid = np.array([aTi.translation()
                                 for aTi in aTi_list]).mean(axis=0)
        aTi_rot_aligned_centroid = np.array(
            [aTi.translation() for aTi in aTi_list_rot_aligned]).mean(axis=0)

        # construct the final SIM3 transform
        aSb = Similarity3(aSb.rotation(),
                          aTi_centroid - aTi_rot_aligned_centroid, 1.0)

    return aSb
예제 #4
0
    def test_align_poses_on_panorama_after_sim3_transform(self):
        """Test for alignment of poses after applying a forward motion transformation."""

        translation_shift = np.array([0, 5, 0])
        rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
        scaling_factor = 1.0

        aTi_list = sample_poses.PANORAMA_GLOBAL_POSES
        bSa = Similarity3(rotation_shift, translation_shift, scaling_factor)
        bTi_list = [bSa.transformFrom(x) for x in aTi_list]

        aTi_list_ = geometry_comparisons.align_poses_sim3(aTi_list, bTi_list)
        self.__assert_equality_on_pose3s(aTi_list_, aTi_list)
예제 #5
0
    def test_align_poses_after_sim3_transform(self):
        """Test for alignment of poses after applying a SIM3 transformation."""

        translation_shift = np.array([5, 10, -5])
        rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
        scaling_factor = 0.7

        transform = Similarity3(rotation_shift, translation_shift,
                                scaling_factor)
        ref_list = [
            transform.transformFrom(x)
            for x in sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES
        ]

        computed_poses = geometry_comparisons.align_poses_sim3(
            sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES, ref_list)
        self.__assert_equality_on_pose3s(
            computed_poses, sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES)
예제 #6
0
def align_estimated_gtsfm_data(
    ba_input: GtsfmData, ba_output: GtsfmData, gt_pose_graph: List[Optional[Pose3]]
) -> Tuple[GtsfmData, GtsfmData, List[Optional[Pose3]]]:
    """Creates modified GtsfmData objects that emulate ba_input and ba_output but with point cloud and camera
    frustums aligned to the x,y,z axes. Also transforms GT camera poses to be aligned to axes.

    Args:
        ba_input: GtsfmData input to bundle adjustment.
        ba_output: GtsfmData output from bundle adjustment.
        gt_pose_graph: list of GT camera poses.

    Returns:
        Updated ba_input GtsfmData object aligned to axes.
        Updated ba_output GtsfmData object aligned to axes.
        Updated gt_pose_graph with GT poses aligned to axes.
    """
    walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(ba_output)
    walignedSw = Similarity3(R=walignedTw.rotation(), t=walignedTw.translation(), s=1.0)
    ba_input = ba_input.apply_Sim3(walignedSw)
    ba_output = ba_output.apply_Sim3(walignedSw)
    gt_pose_graph = [walignedSw.transformFrom(wTi) if wTi is not None else None for wTi in gt_pose_graph]
    return ba_input, ba_output, gt_pose_graph
예제 #7
0
파일: test_Sim3.py 프로젝트: pu-wei/gtsam
    def test_align_poses_scaled_squares(self):
        """Test if Align Pose3Pairs method can account for gauge ambiguity.

        Make sure a big and small square can be aligned.
        The u's represent a big square (10x10), and v's represents a small square (4x4).

        Scenario:
           4 object poses
           with gauge ambiguity (2.5x scale)
        """
        # 0, 90, 180, and 270 degrees yaw
        R0 = Rot3.Rz(np.deg2rad(0))
        R90 = Rot3.Rz(np.deg2rad(90))
        R180 = Rot3.Rz(np.deg2rad(180))
        R270 = Rot3.Rz(np.deg2rad(270))

        aTi0 = Pose3(R0, np.array([2, 3, 0]))
        aTi1 = Pose3(R90, np.array([12, 3, 0]))
        aTi2 = Pose3(R180, np.array([12, 13, 0]))
        aTi3 = Pose3(R270, np.array([2, 13, 0]))

        aTi_list = [aTi0, aTi1, aTi2, aTi3]

        bTi0 = Pose3(R0, np.array([4, 3, 0]))
        bTi1 = Pose3(R90, np.array([8, 3, 0]))
        bTi2 = Pose3(R180, np.array([8, 7, 0]))
        bTi3 = Pose3(R270, np.array([4, 7, 0]))

        bTi_list = [bTi0, bTi1, bTi2, bTi3]

        ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list)))

        # Recover the transformation wSe (i.e. world_S_egovehicle)
        aSb = Similarity3.Align(ab_pairs)

        for aTi, bTi in zip(aTi_list, bTi_list):
            self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
예제 #8
0
    def test_get_ortho_axis_alignment_transform(self) -> None:
        """Tests the get_ortho_axis_alignment_transform() function with a GtsfmData object containing 3 camera frustums
        and 6 points in the point cloud. All points lie on z=0 plane. All frustums lie on z=2 plane and look down on
        the z=0 plane.

           sample_data:              output_data:

               y                         y
               |                         o
               |                         |
               |     o                   |
           o   |                         c
             c | c                       |
          ------------- x   ==>  --o--c-----c--o-- x
             o | c                       |
               |   o                     o
               |                         |

        c = point at (xi,yi,0) with a camera frustum at (xi,yi,2)
        o = point at (xi,yi,0)
        """
        sample_data = GtsfmData(number_images=3)
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)

        # Add 3 camera frustums to sample_data (looking down at z=0 plane)
        cam_translations = np.array([[-1, 1, 2], [1, 1, 2], [1, -1, 2]])

        for i in range(len(cam_translations)):
            camera = PinholeCameraCal3Bundler(
                Pose3(Rot3(), cam_translations[i, :]), default_intrinsics)
            sample_data.add_camera(i, camera)

        # Add 6 tracks to sample_data

        # fmt: off
        points3d = np.array([
            [1, 1, 0],
            [-1, 1, 0],
            [-2, 2, 0],
            [-1, -1, 0],
            [1, -1, 0],
            [2, -2, 0],
            [5, 5, 0]  # represents an outlier in this set of points
        ])
        # fmt: on

        for pt_3d in points3d:
            sample_data.add_track(SfmTrack(pt_3d))

        # Apply alignment transformation to sample_data
        walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(
            sample_data)
        walignedSw = Similarity3(R=walignedTw.rotation(),
                                 t=walignedTw.translation(),
                                 s=1.0)
        sample_data = sample_data.apply_Sim3(walignedSw)

        # Verify correct 3d points.
        computed_3d_points = np.array([
            sample_data.get_track(i).point3()
            for i in range(sample_data.number_tracks())
        ])
        expected_3d_points = np.array([
            [0, np.sqrt(2), 0],
            [-np.sqrt(2), 0, 0],
            [-2 * np.sqrt(2), 0, 0],
            [0, -np.sqrt(2), 0],
            [np.sqrt(2), 0, 0],
            [2 * np.sqrt(2), 0, 0],
            [0, 5 * np.sqrt(2), 0],
        ])
        npt.assert_almost_equal(computed_3d_points,
                                expected_3d_points,
                                decimal=3)

        # Verify correct camera poses.
        expected_wTi_list = [
            Pose3(walignedTw.rotation(), np.array([-np.sqrt(2), 0, 2])),
            Pose3(walignedTw.rotation(), np.array([0, np.sqrt(2), 2])),
            Pose3(walignedTw.rotation(), np.array([np.sqrt(2), 0, 2])),
        ]

        computed_wTi_list = sample_data.get_camera_poses()
        for wTi_computed, wTi_expected in zip(computed_wTi_list,
                                              expected_wTi_list):
            assert wTi_computed.equals(wTi_expected, tol=1e-9)
예제 #9
0
    def test_point_cloud_cameras_locked(self) -> None:
        """Tests the get_ortho_axis_alignment_transform() function with a GtsfmData object containing 11 point cloud
        points and 12 camera frustums from the door-12 dataset. Determines if the points and frustums are properly
        "locked" in with one another before and after the alignment transformation is applied.
        """
        sample_data = GtsfmData(number_images=12)

        # Instantiate OlssonLoader to read camera poses from door12 dataset.
        wTi_list = self.loader._wTi_list

        # Add 12 camera frustums to sample_data.
        default_intrinsics = Cal3Bundler(fx=100, k1=0, k2=0, u0=0, v0=0)
        for idx, pose in enumerate(wTi_list):
            camera = PinholeCameraCal3Bundler(pose, default_intrinsics)
            sample_data.add_camera(idx, camera)

        # fmt: off
        # These points are taken directly from the first 11 points generated by GTSFM on the door12 dataset (without
        # any separate alignment transformation being applied)
        points_3d = np.array(
            [[-1.4687794397729077, -1.4966178675020756, 14.583277665978546],
             [-1.6172612359102505, -1.0951470733744013, 14.579095414379562],
             [-3.2190882723771783, -4.463465966172758, 14.444076631000476],
             [-0.6754206497590093, -1.1132530165104157, 14.916222213341355],
             [-1.5514099044537981, -1.305810425894855, 14.584788688422206],
             [-1.551319353347404, -1.304881682597853, 14.58246449772602],
             [-1.9055918588057448, -1.192867982227922, 14.446379510423219],
             [-1.5936792439193013, -1.4398818807488012, 14.587749795933021],
             [-1.5937405395983737, -1.4401641027442411, 14.588167699143174],
             [-1.6599318889904735, -1.2273604755959784, 14.57861988411431],
             [2.1935589900444867, 1.6233406628428935, 12.610234497076608]])
        # fmt: on

        # Add all point cloud points to sample_data
        for point_3d in points_3d:
            sample_data.add_track(SfmTrack(point_3d))

        camera_translations = np.array(
            [pose.translation() for pose in sample_data.get_camera_poses()])
        initial_relative_distances = scipy.spatial.distance.cdist(
            camera_translations, points_3d, metric="euclidean")

        # Apply alignment transformation to sample_data
        walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(
            sample_data)
        walignedSw = Similarity3(R=walignedTw.rotation(),
                                 t=walignedTw.translation(),
                                 s=1.0)
        sample_data = sample_data.apply_Sim3(walignedSw)

        # Aggregate the final, transformed points
        num_tracks = sample_data.number_tracks()
        transformed_points_3d = [
            np.array(sample_data.get_track(i).point3())
            for i in range(num_tracks)
        ]
        transformed_points_3d = np.array(transformed_points_3d)
        transformed_camera_translations = np.array(
            [pose.translation() for pose in sample_data.get_camera_poses()])

        final_relative_distances = scipy.spatial.distance.cdist(
            transformed_camera_translations,
            transformed_points_3d,
            metric="euclidean")

        npt.assert_almost_equal(final_relative_distances,
                                initial_relative_distances,
                                decimal=3)
예제 #10
0
def align_poses_sim3(aTi_list: List[Pose3],
                     bTi_list: List[Pose3]) -> Tuple[List[Pose3], Similarity3]:
    """Align two pose graphs via similarity transformation. Note: poses cannot be missing/invalid.

    We force SIM(3) alignment rather than SE(3) alignment.
    We assume the two trajectories are of the exact same length.

    Args:
        aTi_list: reference poses in frame "a" which are the targets for alignment
        bTi_list: input poses which need to be aligned to frame "a"

    Returns:
        aTi_list_: transformed input poses previously "bTi_list" but now which
            have the same origin and scale as reference (now living in "a" frame)
        aSb: Similarity(3) object that aligns the two pose graphs.
    """
    assert len(aTi_list) == len(bTi_list)

    valid_pose_tuples = [
        pose_tuple for pose_tuple in list(zip(aTi_list, bTi_list))
        if pose_tuple[0] is not None and pose_tuple[1] is not None
    ]
    n_to_align = len(valid_pose_tuples)
    if n_to_align < 2:
        logger.error("SIM(3) alignment uses at least 2 frames; Skipping")
        return bTi_list, Similarity3(Rot3(), np.zeros((3, )), 1.0)

    ab_pairs = Pose3Pairs(valid_pose_tuples)

    aSb = Similarity3.Align(ab_pairs)

    if np.isnan(aSb.scale()) or aSb.scale() == 0:
        # we have run into a case where points have no translation between them (i.e. panorama).
        # We will first align the rotations and then align the translation by using centroids.
        # TODO: handle it in GTSAM

        # align the rotations first, so that we can find the translation between the two panoramas
        aSb = Similarity3(aSb.rotation(), np.zeros((3, )), 1.0)
        aTi_list_rot_aligned = [
            aSb.transformFrom(bTi) for _, bTi in valid_pose_tuples
        ]

        # fit a single translation motion to the centroid
        aTi_centroid = np.array(
            [aTi.translation() for aTi, _ in valid_pose_tuples]).mean(axis=0)
        aTi_rot_aligned_centroid = np.array(
            [aTi.translation() for aTi in aTi_list_rot_aligned]).mean(axis=0)

        # construct the final SIM3 transform
        aSb = Similarity3(aSb.rotation(),
                          aTi_centroid - aTi_rot_aligned_centroid, 1.0)

    # TODO(johnwlambert): fix bug in GTSAM, where scale can flip to a small negative number
    # a negative scale destroys cheirality when applied.
    # See GTSAM issue here: https://github.com/borglab/gtsam/issues/995
    aSb = Similarity3(R=aSb.rotation(),
                      t=aSb.translation(),
                      s=np.absolute(aSb.scale()))

    # provide a summary of the estimated alignment transform
    aRb = aSb.rotation().matrix()
    atb = aSb.translation()
    rz, ry, rx = Rotation.from_matrix(aRb).as_euler("zyx", degrees=True)
    logger.info(
        "Sim(3) Rotation `aRb`: rz=%.2f deg., ry=%.2f deg., rx=%.2f deg.", rz,
        ry, rx)
    logger.info(f"Sim(3) Translation `atb`: [tx,ty,tz]={str(np.round(atb,2))}")
    logger.info("Sim(3) Scale `asb`: %.2f", float(aSb.scale()))

    aTi_list_ = []
    for bTi in bTi_list:
        if bTi is None:
            aTi_list_.append(None)
        else:
            aTi_list_.append(aSb.transformFrom(bTi))

    logger.info("Pose graph Sim(3) alignment complete.")

    return aTi_list_, aSb
예제 #11
0
def align_poses_sim3(aTi_list: List[Pose3],
                     bTi_list: List[Pose3]) -> List[Pose3]:
    """Align by similarity transformation.

    We force SIM(3) alignment rather than SE(3) alignment.
    We assume the two trajectories are of the exact same length.

    Args:
        aTi_list: reference poses in frame "a" which are the targets for alignment
        bTi_list: input poses which need to be aligned to frame "a"

    Returns:
        aTi_list_: transformed input poses previously "bTi_list" but now which
            have the same origin and scale as reference (now living in "a" frame)
    """
    n_to_align = len(aTi_list)
    assert len(aTi_list) == len(bTi_list)
    assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames"

    ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list)))

    aSb = Similarity3.Align(ab_pairs)

    if np.isnan(aSb.scale()) or aSb.scale() == 0:
        # we have run into a case where points have no translation between them (i.e. panorama).
        # We will first align the rotations and then align the translation by using centroids.
        # TODO: handle it in GTSAM

        # align the rotations first, so that we can find the translation between the two panoramas
        aSb = Similarity3(aSb.rotation(), np.zeros((3, )), 1.0)
        aTi_list_rot_aligned = [aSb.transformFrom(bTi) for bTi in bTi_list]

        # fit a single translation motion to the centroid
        aTi_centroid = np.array([aTi.translation()
                                 for aTi in aTi_list]).mean(axis=0)
        aTi_rot_aligned_centroid = np.array(
            [aTi.translation() for aTi in aTi_list_rot_aligned]).mean(axis=0)

        # construct the final SIM3 transform
        aSb = Similarity3(aSb.rotation(),
                          aTi_centroid - aTi_rot_aligned_centroid, 1.0)

    # provide a summary of the estimated alignment transform
    aRb = aSb.rotation().matrix()
    atb = aSb.translation()
    rz, ry, rx = Rotation.from_matrix(aRb).as_euler("zyx", degrees=True)
    logger.info(
        f"Sim(3) Rotation `aRb`: rz={rz:.2f} deg., ry={ry:.2f} deg., rx={rx:.2f} deg.",
    )
    logger.info(f"Sim(3) Translation `atb`: [tx,ty,tz]={str(np.round(atb,2))}")
    logger.info(f"Sim(3) Scale `asb`: {float(aSb.scale()):.2f}")

    aTi_list_ = []
    for i in range(n_to_align):
        bTi = bTi_list[i]

        aTi_list_.append(aSb.transformFrom(bTi))

    logger.info("Pose graph Sim(3) alignment complete.")

    return aTi_list_