def test_optimize(self): """Do trivial test with three optimizer variants.""" fg = NonlinearFactorGraph() model = gtsam.noiseModel.Unit.Create(2) fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum xstar = Point2(0, 0) optimal_values = Values() optimal_values.insert(KEY1, xstar) self.assertEqual(0.0, fg.error(optimal_values), 0.0) # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = x0 = Point2(3, 3) initial_values = Values() initial_values.insert(KEY1, x0) self.assertEqual(9.0, fg.error(initial_values), 1e-3) # optimize parameters ordering = Ordering() ordering.push_back(KEY1) # Gauss-Newton gnParams = GaussNewtonParams() gnParams.setOrdering(ordering) actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize() self.assertAlmostEqual(0, fg.error(actual1)) # Levenberg-Marquardt lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setOrdering(ordering) actual2 = LevenbergMarquardtOptimizer(fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, fg.error(actual2)) # Levenberg-Marquardt lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() cgParams.setPreconditionerParams(DummyPreconditionerParameters()) lmParams.setIterativeParams(cgParams) actual2 = LevenbergMarquardtOptimizer(fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, fg.error(actual2)) # Dogleg dlParams = DoglegParams() dlParams.setOrdering(ordering) actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize() self.assertAlmostEqual(0, fg.error(actual3)) # Graduated Non-Convexity (GNC) gncParams = GncLMParams() actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize() self.assertAlmostEqual(0, fg.error(actual4))
def create_initial_estimate(n): """Create initial estimate for n events.""" initial = Values() zero = Event() for key in range(n): TOAFactor.InsertEvent(key, zero, initial) return initial
def test_values(self): values = Values() E = EssentialMatrix(Rot3(), Unit3()) tol = 1e-9 values.insert(0, Point2(0, 0)) values.insert(1, Point3(0, 0, 0)) values.insert(2, Rot2()) values.insert(3, Pose2()) values.insert(4, Rot3()) values.insert(5, Pose3()) values.insert(6, Cal3_S2()) values.insert(7, Cal3DS2()) values.insert(8, Cal3Bundler()) values.insert(9, E) values.insert(10, imuBias_ConstantBias()) # Special cases for Vectors and Matrices # Note that gtsam's Eigen Vectors and Matrices requires double-precision # floating point numbers in column-major (Fortran style) storage order, # whereas by default, numpy.array is in row-major order and the type is # in whatever the number input type is, e.g. np.array([1,2,3]) # will have 'int' type. # # The wrapper will automatically fix the type and storage order for you, # but for performance reasons, it's recommended to specify the correct # type and storage order. # for vectors, the order is not important, but dtype still is vec = np.array([1., 2., 3.]) values.insert(11, vec) mat = np.array([[1., 2.], [3., 4.]], order='F') values.insert(12, mat) # Test with dtype int and the default order='C' # This still works as the wrapper converts to the correct type and order for you # but is nornally not recommended! mat2 = np.array([[1, 2, ], [3, 5]]) values.insert(13, mat2) self.assertTrue(values.atPoint2(0).equals(Point2(0,0), tol)) self.assertTrue(values.atPoint3(1).equals(Point3(0,0,0), tol)) self.assertTrue(values.atRot2(2).equals(Rot2(), tol)) self.assertTrue(values.atPose2(3).equals(Pose2(), tol)) self.assertTrue(values.atRot3(4).equals(Rot3(), tol)) self.assertTrue(values.atPose3(5).equals(Pose3(), tol)) self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol)) self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol)) self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol)) self.assertTrue(values.atEssentialMatrix(9).equals(E, tol)) self.assertTrue(values.atimuBias_ConstantBias( 10).equals(imuBias_ConstantBias(), tol)) # special cases for Vector and Matrix: actualVector = values.atVector(11) self.assertTrue(np.allclose(vec, actualVector, tol)) actualMatrix = values.atMatrix(12) self.assertTrue(np.allclose(mat, actualMatrix, tol)) actualMatrix2 = values.atMatrix(13) self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
def setUp(self): """Set up the optimization problem and ordering""" # create graph self.fg = NonlinearFactorGraph() model = gtsam.noiseModel.Unit.Create(2) self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum xstar = Point2(0, 0) self.optimal_values = Values() self.optimal_values.insert(KEY1, xstar) self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0) # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = x0 = Point2(3, 3) self.initial_values = Values() self.initial_values.insert(KEY1, x0) self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3) # optimize parameters self.ordering = Ordering() self.ordering.push_back(KEY1)
def test_call(self): expected_pose = Pose2(1, 1, 0) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0]), error_func) v = Values() v.insert(0, Pose2(1, 0, 0)) e = cf.error(v) self.assertEqual(e, 0.5)
def test_call(self): """Test if calling the factor works (only error)""" expected_pose = Pose2(1, 1, 0) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: """Minimal error function with no Jacobian""" key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0], error_func) v = Values() v.insert(0, Pose2(1, 0, 0)) e = cf.error(v) self.assertEqual(e, 0.5)
def test_optimization(self): """Tests if a factor graph with a CustomFactor can be properly optimized""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """ Error function that mimics a BetweenFactor :param this: reference to the current CustomFactor being evaluated :param v: Values object :param H: list of references to the Jacobian arrays :return: the non-linear error """ key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) if H is not None: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0, 1], error_func) fg = gtsam.NonlinearFactorGraph() fg.add(cf) fg.add(gtsam.PriorFactorPose2(0, gT1, noise_model)) v = Values() v.insert(0, Pose2(0, 0, 0)) v.insert(1, Pose2(0, 0, 0)) params = gtsam.LevenbergMarquardtParams() optimizer = gtsam.LevenbergMarquardtOptimizer(fg, v, params) result = optimizer.optimize() self.gtsamAssertEquals(result.atPose2(0), gT1, tol=1e-5) self.gtsamAssertEquals(result.atPose2(1), gT2, tol=1e-5)
def test_jacobian(self): """Tests if the factor result matches the GTSAM Pose2 unit test""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """ the custom error function. One can freely use variables captured from the outside scope. Or the variables can be acquired by indexing `v`. Jacobian is passed by modifying the H array of numpy matrices. """ key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) if H is not None: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, expected, noise_model) gf = cf.linearize(v) gf_b = bf.linearize(v) J_cf, b_cf = gf.jacobian() J_bf, b_bf = gf_b.jacobian() np.testing.assert_allclose(J_cf, J_bf) np.testing.assert_allclose(b_cf, b_bf)
def test_no_jacobian(self): """Tests that we will not calculate the Jacobian if not requested""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """ Error function that mimics a BetweenFactor :param this: reference to the current CustomFactor being evaluated :param v: Values object :param H: list of references to the Jacobian arrays :return: the non-linear error """ key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) self.assertTrue( H is None) # Should be true if we only request the error if H is not None: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, expected, noise_model) e_cf = cf.error(v) e_bf = bf.error(v) np.testing.assert_allclose(e_cf, e_bf)
def test_jacobian(self): """Tests if the factor result matches the GTSAM Pose2 unit test""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): # print(f"{this = },\n{v = },\n{len(H) = }") key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2)) if len(H) > 0: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model) gf = cf.linearize(v) gf_b = bf.linearize(v) J_cf, b_cf = gf.jacobian() J_bf, b_bf = gf_b.jacobian() np.testing.assert_allclose(J_cf, J_bf) np.testing.assert_allclose(b_cf, b_bf)
def main(): """ Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). Each variable in the system (poses and landmarks) must be identified with a unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1). Here we will use Symbols In GTSAM, measurement functions are represented as 'factors'. Several common factors have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems. Here we will use Projection factors to model the camera's landmark observations. Also, we will initialize the robot at some location using a Prior factor. When the factors are created, we will add them to a Factor Graph. As the factors we are using are nonlinear factors, we will need a Nonlinear Factor Graph. Finally, once all of the factors have been added to our factor graph, we will want to solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values. GTSAM includes several nonlinear optimizers to perform this step. Here we will use a trust-region method known as Powell's Degleg The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the nonlinear functions around an initial linearization point, then solve the linear system to update the linearization point. This happens repeatedly until the solver converges to a consistent set of variable values. This requires us to specify an initial guess for each variable, held in a Values container. """ # Define the camera calibration parameters K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model measurement_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() # Create the set of ground-truth poses poses = SFMdata.createPoses(K) # Create a factor graph graph = NonlinearFactorGraph() # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z pose_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Simulated measurements from each camera pose, adding them to the factor graph for i, pose in enumerate(poses): camera = PinholeCameraCal3_S2(pose, K) for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2(measurement, measurement_noise, X(i), L(j), K) graph.push_back(factor) # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') # Create the data structure to hold the initial estimate to the solution # Intentionally initialize the variables off from the ground truth initial_estimate = Values() for i, pose in enumerate(poses): transformed_pose = pose.retract(0.1 * np.random.randn(6, 1)) initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): transformed_point = point + 0.1 * np.random.randn(3) initial_estimate.insert(L(j), transformed_point) initial_estimate.print_('Initial Estimates:\n') # Optimize the graph and print results params = gtsam.DoglegParams() params.setVerbosity('TERMINATION') optimizer = DoglegOptimizer(graph, initial_estimate, params) print('Optimizing:') result = optimizer.optimize() result.print_('Final results:\n') print('initial error = {}'.format(graph.error(initial_estimate))) print('final error = {}'.format(graph.error(result))) marginals = Marginals(graph, result) plot.plot_3d_points(1, result, marginals=marginals) plot.plot_trajectory(1, result, marginals=marginals, scale=8) plot.set_axes_equal(1) plt.show()
def main(): """ A structure-from-motion example with landmarks - The landmarks form a 10 meter cube - The robot rotates around the landmarks, always facing towards the cube """ # Define the camera calibration parameters K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model camera_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() # Create the set of ground-truth poses poses = SFMdata.createPoses(K) # Create a NonlinearISAM object which will relinearize and reorder the variables # every "reorderInterval" updates isam = NonlinearISAM(reorderInterval=3) # Create a Factor Graph and Values to hold the new data graph = NonlinearFactorGraph() initial_estimate = Values() # Loop over the different poses, adding the observations to iSAM incrementally for i, pose in enumerate(poses): camera = PinholeCameraCal3_S2(pose, K) # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose initial_estimate.insert(X(i), initial_xi) # If this is the first iteration, add a prior on the first pose to set the coordinate frame # and a prior on the first landmark to set the scale # Also, as iSAM solves incrementally, we must wait until each is observed at least twice before # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z pose_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) # Add initial guesses to all observed landmarks noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth initial_lj = points[j] + noise initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) current_estimate = isam.estimate() print('*' * 50) print('Frame {}:'.format(i)) current_estimate.print_('Current estimate: ') # Clear the factor graph and values for the next iteration graph.resize(0) initial_estimate.clear()
measurement = camera.project(point) # print(measurement) # factor = GenericProjectionFactorCal3_S2(measurement, measurement_noise, X(i), L(j), K) factor = gtsam.CustomFactor(measurement_noise, [X(i),L(j)], partial(error_point_landmarks,measurement, K, sems)) graph.push_back(factor) #Add Point Prior point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) # graph.print('Factor Graph:\n') poses_noise = [] points_noise = [] #Store Inital Values initial_estimate = Values() for i, pose in enumerate(poses): transformed_pose = pose.retract(0.1 * np.random.randn(6, 1)) poses_noise.append(transformed_pose) initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): transformed_point = point + 0.1*np.random.randn(3) points_noise.append(transformed_point) initial_estimate.insert(L(j), transformed_point) # initial_estimate.print('Initial Estimates:\n') # Optimize the graph and print results params = gtsam.LevenbergMarquardtParams() params.setVerbosity('TERMINATION') optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params) print('Optimizing:')
def IMU_example(): """Run iSAM 2 example with IMU factor.""" # Start with a camera on x-axis looking at origin radius = 30 camera = get_camera(radius) pose_0 = camera.pose() delta_t = 1.0 / 18 # makes for 10 degrees per step angular_velocity = math.radians(180) # rad/sec scenario = get_scenario(radius, pose_0, angular_velocity, delta_t) PARAMS, BIAS_COVARIANCE, DELTA = preintegration_parameters() # Create a factor graph graph = NonlinearFactorGraph() # Create (incremental) ISAM2 solver isam = ISAM2() # Create the initial estimate to the solution # Intentionally initialize the variables off from the ground truth initialEstimate = Values() # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) graph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors biasKey = B(0) biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), biasnoise) graph.push_back(biasprior) initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) velprior = PriorFactorVector(V(0), n_velocity, velnoise) graph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) accum = gtsam.PreintegratedImuMeasurements(PARAMS) # Simulate poses and imu measurements, adding them to the factor graph for i in range(80): t = i * delta_t # simulation time if i == 0: # First time add two poses pose_1 = scenario.pose(delta_t) initialEstimate.insert(X(0), pose_0.compose(DELTA)) initialEstimate.insert(X(1), pose_1.compose(DELTA)) elif i >= 2: # Add more poses as necessary pose_i = scenario.pose(t) initialEstimate.insert(X(i), pose_i.compose(DELTA)) if i > 0: # Add Bias variables periodically if i % 5 == 0: biasKey += 1 factor = BetweenFactorConstantBias( biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) graph.add(factor) initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() bRn = np.transpose(nRb) measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, n_gravity) measuredOmega = scenario.omega_b(t) accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) # Add Imu Factor imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) graph.add(imufac) # insert new velocity, which is wrong initialEstimate.insert(V(i), n_velocity) accum.resetIntegration() # Incremental solution isam.update(graph, initialEstimate) result = isam.calculateEstimate() plot.plot_incremental_trajectory(0, result, start=i, scale=3, time_interval=0.01) # reset graph = NonlinearFactorGraph() initialEstimate.clear() plt.show()