コード例 #1
0
    def test_graph_slam_full_with_correspondence_improvement(self):
        """
        Tests that from noisy controls and measurements, the full algorithm reduces the RMS error of state estimates
        over multiple iterations, if the correspondences are correctly given.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, ground_truth_landmarks = \
                self.generate_varied_measurements(ground_truth_states)

            noisy_controls = [add_noise_to_control(control, 0.1, math.pi * 1 / 180) for control in controls]
            noisy_measurements = [add_noise_to_measurements_for_state(measurements_for_state, 0.01, math.pi * 0.1 / 180)
                                  for measurements_for_state in measurements]
            landmark_estimates = dict()

            output_state_estimates = graph_slam_initialize(noisy_controls, ground_truth_states[0])
            initial_rms_state_error = self.calculate_rms_state_error(output_state_estimates, ground_truth_states)

            for iteration_index in range(100):
                xi, omega, landmark_estimates = graph_slam_linearize(output_state_estimates, landmark_estimates,
                                                                     noisy_controls, noisy_measurements,
                                                                     correspondences, self.R, self.Q)
                xi_reduced, omega_reduced = graph_slam_reduce(xi, omega, landmark_estimates)
                output_state_estimates, sigma, landmark_estimates = graph_slam_solve(xi_reduced, omega_reduced, xi,
                                                                                     omega)

                rms_landmark_error = self.calculate_rms_landmark_error(landmark_estimates, ground_truth_landmarks)
                print("Iteration {}, RMS landmark position error: {}".format(iteration_index + 1, rms_landmark_error))

            rms_state_error = self.calculate_rms_state_error(output_state_estimates, ground_truth_states)

            # Assert that incorporating associated measurements will always improve upon the odometry-only estimate
            self.assertLess(rms_state_error, initial_rms_state_error)
コード例 #2
0
    def test_correspondence_probability_with_correspondence_round_trip(self):
        """
        In this test, perfect measurements of the same landmark from different poses are fed into GraphSLAM, all
        measurements with unique correspondence values. Then, the correspondence probabilities are calculated, and
        verified.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, ground_truth_landmarks \
                = self.generate_non_corresponding_measurements_of_same_landmarks(ground_truth_states)

            landmark_estimates = dict()

            xi, omega, landmark_estimates = graph_slam_linearize(
                ground_truth_states, landmark_estimates, controls,
                measurements, correspondences, self.R, self.Q)
            xi_reduced, omega_reduced = graph_slam_reduce(
                xi, omega, landmark_estimates)
            output_state_estimates, sigma, landmark_estimates = graph_slam_solve(
                xi_reduced, omega_reduced, xi, omega)

            for j in range(len(landmark_estimates)):
                for k in range(j + 1, len(landmark_estimates)):
                    p = calculate_correspondence_probability(
                        omega, sigma, landmark_estimates, j, k)
                    pass
コード例 #3
0
    def test_linearize_reduce_no_correspondence_round_trip(self):
        """
        Performs GraphSLAM linearize on measurements and controls, then reduces the dimensionality of the information
        representation via GraphSLAM reduce. Finally, recovers the state means, and checks that they are according to
        the expected values calculated from the controls in a noise-free world. Each input measurement corresponds to
        a different landmark.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, ground_truth_landmarks = \
                self.generate_unique_landmark_measurements(ground_truth_states)

            landmark_estimates = dict()

            xi, omega, landmark_estimates = graph_slam_linearize(ground_truth_states, landmark_estimates, controls,
                                                                 measurements, correspondences, self.R, self.Q)
            xi_reduced, omega_reduced = graph_slam_reduce(xi, omega, landmark_estimates)

            expected_information_size = len(ground_truth_states) * 3
            self.assert_right_information_size(expected_information_size, xi_reduced, omega_reduced)

            mu, sigma = self.solve_from_information(xi_reduced, omega_reduced)

            self.assert_mu_close_to_ground_truth_states(mu, ground_truth_states)
コード例 #4
0
    def test_graph_slam_full_with_correspondence_round_trip(self):
        """
        The test execution follows the description of
        "meth:`core.test.TestGraphSlam.test_graph_slam_full_no_correspondence_round_trip`,
        with the exception, that now every input measurement corresponds to the same landmark.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, ground_truth_landmarks \
                = self.generate_corresponding_measurements_of_same_landmarks(ground_truth_states)

            landmark_estimates = dict()

            xi, omega, landmark_estimates = graph_slam_linearize(
                ground_truth_states, landmark_estimates, controls,
                measurements, correspondences, self.R, self.Q)
            xi_reduced, omega_reduced = graph_slam_reduce(
                xi, omega, landmark_estimates)
            output_state_estimates, sigma, landmark_estimates = graph_slam_solve(
                xi_reduced, omega_reduced, xi, omega)

            self.assert_state_estimates_close(output_state_estimates,
                                              ground_truth_states)
            self.assert_expected_landmark_estimates(ground_truth_landmarks,
                                                    landmark_estimates)
コード例 #5
0
    def test_graph_slam_full_no_correspondence_round_trip(self):
        """
        Performs GraphSLAM a round of linearize, reduce and solve, and checks that the estimated states and landmarks
        are correct. Each input measurement corresponds to a different landmark.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, ground_truth_landmarks = \
                self.generate_unique_landmark_measurements(ground_truth_states)

            landmark_estimates = dict()

            xi, omega, landmark_estimates = graph_slam_linearize(
                ground_truth_states, landmark_estimates, controls,
                measurements, correspondences, self.R, self.Q)
            xi_reduced, omega_reduced = graph_slam_reduce(
                xi, omega, landmark_estimates)
            output_state_estimates, sigma, landmark_estimates = graph_slam_solve(
                xi_reduced, omega_reduced, xi, omega)

            self.assert_state_estimates_close(output_state_estimates,
                                              ground_truth_states)
            self.assert_expected_landmark_estimates(ground_truth_landmarks,
                                                    landmark_estimates)
コード例 #6
0
    def test_linearize_reduce_with_correspondence_round_trip(self):
        """
        The test execution follows the description of
        "meth:`core.test.TestGraphSlam.test_linearize_reduce_no_correspondence_round_trip`,
        with the exception, that now every input measurement corresponds to the same landmark.
        """
        for test_index, controls in enumerate(self.test_controls):
            ground_truth_states = self.test_ground_truth_states[test_index]

            measurements, correspondences, expected_landmarks \
                = self.generate_corresponding_measurements_of_same_landmarks(ground_truth_states)

            landmark_estimates = dict()

            xi, omega, landmark_estimates = graph_slam_linearize(
                ground_truth_states, landmark_estimates, controls,
                measurements, correspondences, self.R, self.Q)
            xi_reduced, omega_reduced = graph_slam_reduce(
                xi, omega, landmark_estimates)

            expected_information_size = len(ground_truth_states) * 3
            self.assert_right_information_size(expected_information_size,
                                               xi_reduced, omega_reduced)

            mu, sigma = self.solve_from_information(xi_reduced, omega_reduced)

            self.assert_mu_close_to_ground_truth_states(
                mu, ground_truth_states)
コード例 #7
0
def graph_slam_random_map():
    ground_truth_map, landmarks = generate_ground_truth_map(
        MAP_HEIGHT, MAP_WIDTH, LANDMARK_COUNT)

    # Set up truly random number generation for creating the ground truth path (if the system supports it)
    true_random_gen = rnd.SystemRandom()
    rnd.seed(true_random_gen.random())

    ground_truth_states, controls = \
        generate_ground_truth_path(ground_truth_map, max_velocity=MAX_VELOCITY,
                                   velocity_deviation=VELOCITY_DEVIATION, max_turn_rate=MAX_TURN_RATE,
                                   turn_rate_deviation=TURN_RATE_DEVIATION, step_count=STEP_COUNT,
                                   velocity_control_deviation=VELOCITY_CONTROL_DEVIATION,
                                   turn_rate_control_deviation=TURN_RATE_CONTROL_DEVIATION)

    measurements, correspondences = generate_measurements(
        ground_truth_states,
        landmarks,
        max_sensing_range=MAX_SENSING_RANGE,
        sensing_range_deviation=SENSING_RANGE_DEVIATION,
        distance_deviation=DISTANCE_DEVIATION,
        heading_deviation=HEADING_DEVIATION)

    initial_state_estimates = graph_slam_initialize(controls,
                                                    state_t0=np.array(
                                                        [[0, 0, 0]]).T)

    landmark_estimates = dict()
    state_estimates = initial_state_estimates

    correspondences = generate_unique_correspondences_for_measurements(
        measurements)

    R = np.identity(3) * 0.00001
    Q = np.identity(3) * 0.00001

    for iteration_index in range(25):
        xi, omega, landmark_estimates = \
            graph_slam_linearize(state_estimates=state_estimates, landmark_estimates=landmark_estimates,
                                 controls=controls, measurements=measurements, correspondences=correspondences,
                                 motion_error_covariance=R, measurement_noise_covariance=Q)

        xi_reduced, omega_reduced = graph_slam_reduce(xi, omega,
                                                      landmark_estimates)
        state_estimates, sigma_states, landmark_estimates = graph_slam_solve(
            xi_reduced, omega_reduced, xi, omega)

    transform_states_into_frame(state_estimates, ground_truth_states[0])
    transform_states_into_frame(initial_state_estimates,
                                ground_truth_states[0])

    plt.figure(figsize=[10, 5])
    plt.subplot(131)
    plt.title("Ground truth map")
    plt.imshow(ground_truth_map, origin='lower')

    plot_path(ground_truth_states, 'C0', "Ground truth")
    plot_path(initial_state_estimates, 'C1', "Initial estimate with odometry")
    plot_path(state_estimates, 'C2', "Estimate after optimization")

    plt.legend()

    current_state = 1
    plot_measurements_for_state(ground_truth_states[current_state],
                                measurements[current_state])

    plt.subplot(132)
    plt.title("Information matrix")
    omega_binary = omega != 0
    plt.imshow(omega_binary)

    plt.subplot(133)
    plt.title("Reduced information matrix")
    omega_reduced_binary = omega_reduced != 0
    plt.imshow(omega_reduced_binary)

    plt.show()
コード例 #8
0
            index + correspondence_index
            for index, _ in enumerate(measurements_for_state)
        ])
        correspondence_index = correspondence_index + len(
            measurements_for_state)

    R = np.identity(3) * 0.00001
    Q = np.identity(3) * 0.00001

    for iteration_index in range(5):
        xi, omega, landmark_estimates = \
            graph_slam_linearize(state_estimates=state_estimates, landmark_estimates=landmark_estimates,
                                 controls=controls, measurements=measurements, correspondences=correspondences,
                                 motion_error_covariance=R, measurement_noise_covariance=Q)

        xi_reduced, omega_reduced = graph_slam_reduce(xi, omega,
                                                      landmark_estimates)
        state_estimates, sigma_states, landmark_estimates = graph_slam_solve(
            xi_reduced, omega_reduced, xi, omega)

    global_state_estimates = []

    for index, state_estimate in enumerate(state_estimates):
        global_state_estimates.append(
            np.concatenate((state_estimate[:2] + ground_truth_states[0][:2],
                            state_estimate[2].reshape((1, 1)))))

    plt.figure(figsize=[10, 5])
    plt.subplot(131)
    plt.title("Ground truth map")
    plt.imshow(ground_truth_map, origin='lower')