Пример #1
0
 def test_constructor(self):
     """Construct from binary measurements."""
     algorithm = gtsam.TranslationRecovery()
     self.assertIsInstance(algorithm, gtsam.TranslationRecovery)
     algorithm_params = gtsam.TranslationRecovery(
         gtsam.LevenbergMarquardtParams())
     self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery)
Пример #2
0
    def test_run(self):
        gt_poses = ExampleValues()
        measurements = SimulateMeasurements(gt_poses, [[0, 1], [0, 2], [1, 2]])

        # Set verbosity to Silent for tests
        lmParams = gtsam.LevenbergMarquardtParams()
        lmParams.setVerbosityLM("silent")

        algorithm = gtsam.TranslationRecovery(measurements, lmParams)
        scale = 2.0
        result = algorithm.run(scale)

        w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(
            0).translation()
        w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(
            0).translation()
        w_aTc_expected = w_aTc * scale / np.linalg.norm(w_aTb)
        w_aTb_expected = w_aTb * scale / np.linalg.norm(w_aTb)

        np.testing.assert_array_almost_equal(result.atPoint3(0),
                                             np.array([0, 0, 0]), 6,
                                             "Origin result is incorrect.")
        np.testing.assert_array_almost_equal(result.atPoint3(1),
                                             w_aTb_expected, 6,
                                             "Point B result is incorrect.")
        np.testing.assert_array_almost_equal(result.atPoint3(2),
                                             w_aTc_expected, 6,
                                             "Point C result is incorrect.")
def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
                   wRc_values: gtsam.Values) -> gtsam.Values:
    """Estimate poses given rotations and normalized translation directions between cameras.

    Args:
        i_iZj_list: List of normalized translation direction measurements between camera pairs, 
                    Z here refers to measurements. The measurements are of camera j with reference 
                    to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit 
                    vector to j in i's frame and is not a transformation. 
        wRc_values: Rotations of the cameras in the world frame.

    Returns:
        gtsam.Values: Estimated poses.
    """

    # Convert the translation direction measurements to world frame using the rotations.
    w_iZj_list = gtsam.BinaryMeasurementsUnit3()
    for i_iZj in i_iZj_list:
        w_iZj = gtsam.Unit3(
            wRc_values.atRot3(i_iZj.key1()).rotate(i_iZj.measured().point3()))
        w_iZj_list.append(
            gtsam.BinaryMeasurementUnit3(i_iZj.key1(), i_iZj.key2(), w_iZj,
                                         i_iZj.noiseModel()))

    # Remove the outliers in the unit translation directions.
    w_iZj_inliers = filter_outliers(w_iZj_list)

    # Run the optimizer to obtain translations for normalized directions.
    wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run()

    wTc_values = gtsam.Values()
    for key in wRc_values.keys():
        wTc_values.insert(
            key, gtsam.Pose3(wRc_values.atRot3(key), wtc_values.atPoint3(key)))
    return wTc_values
Пример #4
0
 def test_constructor(self):
     """Construct from binary measurements."""
     algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3())
     self.assertIsInstance(algorithm, gtsam.TranslationRecovery)