def test_new(self): def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): return np.array([1, 0, 0]) noise_model = gtsam.noiseModel.Unit.Create(3) cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0]), error_func)
def test_factor(self): """Check that the InnerConstraint factor leaves the mean unchanged.""" # Make a graph with two variables, one between, and one InnerConstraint # The optimal result should satisfy the between, while moving the other # variable to make the mean the same as before. # Mean of R and R' is identity. Let's make a BetweenFactor making R21 = # R*R*R, i.e. geodesic length is 3 rather than 2. graph = gtsam.NonlinearFactorGraph() R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) keys = gtsam.KeyVector() keys.append(1) keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() initial.insert(1, R.inverse()) initial.insert(2, R) expected = Rot3() result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() actual = gtsam.FindKarcherMean( gtsam.Rot3Vector([result.atRot3(1), result.atRot3(2)])) self.gtsamAssertEquals(expected, actual) self.gtsamAssertEquals( R12, result.atRot3(1).between(result.atRot3(2)))
def plot_incremental_trajectory(fignum, values, start=0, scale=1, marginals=None, time_interval=0.0): """ Incrementally plot a complete 3D trajectory using poses in `values`. Args: fignum (int): Integer representing the figure number to use for plotting. values (gtsam.Values): Values dict containing the poses. start (int): Starting index to start plotting from. scale (float): Value to scale the poses by. marginals (gtsam.Marginals): Marginalized probability values of the estimation. Used to plot uncertainty bounds. time_interval (float): Time in seconds to pause between each rendering. Used to create animation effect. """ fig = plt.figure(fignum) axes = fig.gca(projection='3d') poses = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(poses.keys()) for key in keys[start:]: if values.exists(key): pose_i = values.atPose3(key) plot_pose3(fignum, pose_i, scale) # Update the plot space to encompass all plotted points axes.autoscale() # Set the 3 axes equal set_axes_equal(fignum) # Pause for a fixed amount of seconds plt.pause(time_interval)
def test_new(self): """Test the creation of a new CustomFactor""" def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """Minimal error function stub""" return np.array([1, 0, 0]) noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, gtsam.KeyVector([0]), error_func)
def test_ordering(self): """Test ordering""" gfg, keys = create_graph() ordering = gtsam.Ordering() for key in keys[::-1]: ordering.push_back(key) bn = gfg.eliminateSequential(ordering) self.assertEqual(bn.size(), 3) keyVector = gtsam.KeyVector() keyVector.append(keys[2])
def test_call(self): expected_pose = Pose2(1, 1, 0) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0]), error_func) v = Values() v.insert(0, Pose2(1, 0, 0)) e = cf.error(v) self.assertEqual(e, 0.5)
def test_jacobian(self): """Tests if the factor result matches the GTSAM Pose2 unit test""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """ the custom error function. One can freely use variables captured from the outside scope. Or the variables can be acquired by indexing `v`. Jacobian is passed by modifying the H array of numpy matrices. """ key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) if H is not None: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, expected, noise_model) gf = cf.linearize(v) gf_b = bf.linearize(v) J_cf, b_cf = gf.jacobian() J_bf, b_bf = gf_b.jacobian() np.testing.assert_allclose(J_cf, J_bf) np.testing.assert_allclose(b_cf, b_bf)
def test_no_jacobian(self): """Tests that we will not calculate the Jacobian if not requested""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """ Error function that mimics a BetweenFactor :param this: reference to the current CustomFactor being evaluated :param v: Values object :param H: list of references to the Jacobian arrays :return: the non-linear error """ key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) self.assertTrue( H is None) # Should be true if we only request the error if H is not None: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, expected, noise_model) e_cf = cf.error(v) e_bf = bf.error(v) np.testing.assert_allclose(e_cf, e_bf)
def test_jacobian(self): """Tests if the factor result matches the GTSAM Pose2 unit test""" gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) expected = Pose2(2, 2, np.pi / 2) def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): # print(f"{this = },\n{v = },\n{len(H) = }") key0 = this.keys()[0] key1 = this.keys()[1] gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2)) if len(H) > 0: result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model) gf = cf.linearize(v) gf_b = bf.linearize(v) J_cf, b_cf = gf.jacobian() J_bf, b_bf = gf_b.jacobian() np.testing.assert_allclose(J_cf, J_bf) np.testing.assert_allclose(b_cf, b_bf)
vv.insert(4, np.array([1., 2., 4.])) vv.print_(b"vv:") vv3 = vv.add(vv2) vv3.print_(b"vv3:") values = gtsam.Values() values.insert(1, gtsam.Point3()) values.insert(2, gtsam.Rot3()) values.print_(b"values:") factor = gtsam.PriorFactorVector(1, np.array([1., 2., 3.]), diag) print "Prior factor vector: ", factor keys = gtsam.KeyVector() keys.push_back(1) keys.push_back(2) print 'size: ', keys.size() print keys.at(0) print keys.at(1) noise = gtsam.noiseModel_Isotropic.Precision(2, 3.0) noise.print_('noise:') print 'noise print:', noise f = gtsam.JacobianFactor(7, np.ones([2, 2]), model=noise, b=np.ones(2)) print 'JacobianFactor(7):\n', f print "A = ", f.getA() print "b = ", f.getb()
def batch_factorgraph_example(): # Create an empty nonlinear factor graph. graph = gtsam.NonlinearFactorGraph() # Create the keys for the poses. X1 = X(1) X2 = X(2) X3 = X(3) pose_variables = [X1, X2, X3] # Create keys for the landmarks. L1 = L(1) L2 = L(2) landmark_variables = [L1, L2] # Add a prior on pose X1 at the origin. prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 0.1])) graph.add( gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), prior_noise)) # Add odometry factors between X1,X2 and X2,X3, respectively odometry_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 0.1])) graph.add( gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), odometry_noise)) graph.add( gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), odometry_noise)) # Add Range-Bearing measurements to two different landmarks L1 and L2 measurement_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.05, 0.1])) graph.add( gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0 + 4.0), measurement_noise)) graph.add( gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, measurement_noise)) graph.add( gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, measurement_noise)) # Create (deliberately inaccurate) initial estimate initial_estimate = gtsam.Values() initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15)) initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20)) initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10)) initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10)) initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80)) # Create an optimizer. params = gtsam.LevenbergMarquardtParams() optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params) # Solve the MAP problem. result = optimizer.optimize() # Calculate marginal covariances for all variables. marginals = gtsam.Marginals(graph, result) # Extract marginals pose_marginals = [] for var in pose_variables: pose_marginals.append( MultivariateNormalParameters(result.atPose2(var), marginals.marginalCovariance(var))) landmark_marginals = [] for var in landmark_variables: landmark_marginals.append( MultivariateNormalParameters(result.atPoint2(var), marginals.marginalCovariance(var))) # You can extract the joint marginals like this. joint_all = marginals.jointMarginalCovariance( gtsam.KeyVector(pose_variables + landmark_variables)) print("Joint covariance over all variables:") print(joint_all.fullMatrix()) # Plot the marginals. plot_result(pose_marginals, landmark_marginals)
# Optimize using Levenberg-Marquardt optimization. The optimizer # accepts an optional set of configuration parameters, controlling # things like convergence criteria, the type of linear system solver # to use, and the amount of information displayed during optimization. # Here we will use the default set of parameters. See the # documentation for the full set of parameters. params = gtsam.LevenbergMarquardtParams() optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params) result = optimizer.optimize() print("\nFinal Result:\n{}".format(result)) optimizer = gtsam.LevenbergMarquardtOptimizer(graph, result, params) result = optimizer.optimize() print("\nFinal Result:\n{}".format(result)) # Calculate and print marginal covariances for all variables marginals = gtsam.Marginals(graph, result) nodes = np.array([X(1), X(2), X(3)]) S = marginals.jointMarginalCovariance(gtsam.KeyVector(nodes)) np.set_printoptions(linewidth=120, precision=4, suppress=True) print(S.at(X(1), X(2))) # for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]: # print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key)))