def test_posteriori(self): init_distr = GaussDistribution(mean=np.array([1, -1]), covariance=np.diag([1, 1])) state_matrix = np.array([[1, -0.5], [0.5, 1]], dtype=float) control_matrix = np.array([[0, 0], [0, 0]], dtype=float) state_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([1, 1])) measurement_matrix = np.array([[1, 2]], dtype=float) measurement_noise = GaussDistribution(mean=np.array([[0]]), covariance=np.diag([1.0])) kalman = KalmanFilter(init_distr, state_matrix, control_matrix, state_noise, measurement_matrix, measurement_noise) measurements = [-2.0, 4.5, 1.75, 7.625] final = [[1.04081633, -1.41836735], [3.53106567, 0.25088478], [0.94444962, 0.66461669], [2.68852441, 2.25424821]] for i in range(0, len(measurements)): kalman.predict(control=np.array([0, 0])) kalman.update(measurements=np.array([measurements[i]])) compare = final[i] self.assertAlmostEqual(first=compare[0], second=kalman.updated().mean[0], delta=1e-2) self.assertAlmostEqual(first=compare[1], second=kalman.updated().mean[1], delta=1e-2)
def test_update(self): init_distr = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.3, 0.2])) state_matrix = np.array([[1, 0], [0, 1]], dtype=float) control_matrix = np.array([[2, 0], [0, 2]], dtype=float) state_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.3, 0.2])) measurement_matrix = np.array([[1, 0], [0, 1]], dtype=float) measurement_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([1.0, 1.0])) kalman = KalmanFilter(init_distr, state_matrix, control_matrix, state_noise, measurement_matrix, measurement_noise) updated_state_matrix = np.array([[2, 0], [0, 1]], dtype=float) kalman.update_state_matrix(updated_state_matrix) updated_control_matrix = np.array([[2, 3], [3, 3]], dtype=float) kalman.update_control_matrix(updated_control_matrix) updated_state_noise = GaussDistribution(mean=np.array([1, 0]), covariance=np.diag([0.2, 0.2])) kalman.update_state_noise(updated_state_noise) updated_measurement_matrix = np.array([[2, 2], [2, 2]], dtype=float) kalman.update_measurement_matrix(updated_measurement_matrix) updated_measurement_noise = GaussDistribution(mean=np.array([1, 1]), covariance=np.diag( [2.0, 2.0])) kalman.update_measurement_noise(updated_measurement_noise) self.assertTrue( np.array_equal(kalman._state_matrix, updated_state_matrix)) self.assertTrue( np.array_equal(kalman._control_matrix, updated_control_matrix)) self.assertTrue( np.array_equal(kalman._state_noise.mean, updated_state_noise.mean)) self.assertTrue( np.array_equal(kalman._state_noise.covariance, updated_state_noise.covariance)) self.assertTrue( np.array_equal(kalman._measurement_matrix, updated_measurement_matrix)) self.assertTrue( np.array_equal(kalman._measurement_noise.mean, updated_measurement_noise.mean)) self.assertTrue( np.array_equal(kalman._measurement_noise.covariance, updated_measurement_noise.covariance))
def test_initialization(self): mean = 10.0 var = 0.5 distr = GaussDistribution.create_1d(mean, var) self.assertAlmostEqual(first=mean, second=distr.mean[0], delta=1e-6) self.assertAlmostEqual(first=var, second=distr.covariance[0][0], delta=1e-6)
def predict(self, control: np.array): priori_mean = self._predict_state(control) op_matrix = self._state_matrix.dot(self._updated.covariance).dot( self._state_matrix.transpose()) priori_covariance = op_matrix + self._state_noise.covariance self._predicted = GaussDistribution(mean=priori_mean, covariance=priori_covariance)
def test_distr_initialization(self): mean = [10.0, 15.0] variances = [0.1, 0.5] multi_distr = GaussDistribution.create_independent(mean, variances) self.assertTrue( np.array_equal(np.array(mean, dtype=float), multi_distr.mean)) self.assertTrue( np.array_equal(np.diag(variances), multi_distr.covariance))
def test_sample(self): mean = [10.0, 15.0] variances = [0.1, 0.5] multi_distr = GaussDistribution.create_independent(mean, variances) for i in range(0, 10): sample = multi_distr.sample() self.assertTrue( abs(mean[0] - sample[0]) < 6.0 * math.sqrt(variances[0])) self.assertTrue( abs(mean[1] - sample[1]) < 6.0 * math.sqrt(variances[1]))
def test_initialization(self): init_distr = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.3, 0.2])) state_matrix = np.array([[1, 0], [0, 1]], dtype=float) control_matrix = np.array([[2, 0], [0, 2]], dtype=float) state_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.3, 0.2])) measurement_matrix = np.array([[1, 0], [0, 1]], dtype=float) measurement_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([1.0, 1.0])) kalman = KalmanFilter(init_distr, state_matrix, control_matrix, state_noise, measurement_matrix, measurement_noise) self.assertTrue( np.array_equal(kalman.predicted().mean, init_distr.mean)) self.assertTrue( np.array_equal(kalman.predicted().covariance, init_distr.covariance)) self.assertTrue(np.array_equal(kalman.updated().mean, init_distr.mean)) self.assertTrue( np.array_equal(kalman.updated().covariance, init_distr.covariance)) self.assertTrue(np.array_equal(kalman._state_matrix, state_matrix)) self.assertTrue(np.array_equal(kalman._control_matrix, control_matrix)) self.assertTrue( np.array_equal(kalman._state_noise.mean, state_noise.mean)) self.assertTrue( np.array_equal(kalman._state_noise.covariance, state_noise.covariance)) self.assertTrue( np.array_equal(kalman._measurement_matrix, measurement_matrix)) self.assertTrue( np.array_equal(kalman._measurement_noise.mean, measurement_noise.mean)) self.assertTrue( np.array_equal(kalman._measurement_noise.covariance, measurement_noise.covariance))
def test_priori(self): init_distr = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.1, 0.1])) state_matrix = np.array([[1, 0], [0, 1]], dtype=float) control_matrix = np.array([[2, 0], [0, 2]], dtype=float) state_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([0.3, 0.2])) measurement_matrix = np.array([[1, 0], [0, 1]], dtype=float) measurement_noise = GaussDistribution(mean=np.array([0, 0]), covariance=np.diag([1.0, 1.0])) kalman = KalmanFilter(init_distr, state_matrix, control_matrix, state_noise, measurement_matrix, measurement_noise) kalman.predict(control=np.array([1, 1], dtype=float)) kalman.update(measurements=np.array([2, 2])) kalman.predict(control=np.array([1, 1], dtype=float)) kalman.update(measurements=np.array([4.1, 4.02])) self.assertTrue( np.array_equal(kalman.predicted().mean, np.array([4.0, 4.0], dtype=float)))
def update(self, measurements: np.array): samples = self._sample_points(self._predicted, self._measurement_noise) x_len = len(self._updated.mean) z_len = len(measurements) eval_samples = np.zeros((samples.shape[0], z_len)) for i in range(0, samples.shape[0]): eval_samples[i] = self._eval_measurement_func(samples[i]) mean = np.zeros((z_len, )) for i in range(0, 2 * self._n + 1): mean += self._mean_weights[i] * eval_samples[i] cov = np.zeros((z_len, z_len)) for i in range(0, 2 * self._n + 1): op_vector = eval_samples[i] - mean op_vector = op_vector.reshape((1, len(op_vector))) cov += self._cov_weights[i] * op_vector.transpose().dot(op_vector) cov += self._measurement_noise.covariance cov = self.fix_covariance(cov) state_samples = np.zeros((samples.shape[0], x_len)) for i in range(0, 2 * self._n + 1): state_samples[i] = samples[i][:len(self._updated.mean)] cross_cov = np.zeros((x_len, z_len)) for i in range(0, 2 * self._n + 1): op_sample = state_samples[i] - self._predicted.mean op_sample = op_sample.reshape((1, len(op_sample))) op_eval = eval_samples[i] - mean op_eval = op_eval.reshape((1, len(op_eval))) cross_cov += self._cov_weights[i] * op_sample.transpose().dot( op_eval) kalman_gain = cross_cov.dot(np.linalg.inv(cov)) mean_update = self._predicted.mean + kalman_gain.dot(measurements - mean) cov_update = self._predicted.covariance - kalman_gain.dot(cov).dot( kalman_gain.transpose()) cov_update = self.fix_covariance(cov_update) self._updated = GaussDistribution(mean=mean_update, covariance=cov_update)
def test_sample(self): mean = [10.0, 15.0] variances = [0.1, 0.5] multi_distr = GaussDistribution.create_independent(mean, variances) n = 100 samples = np.zeros((n, 2)) weights = np.zeros((n, )) for i in range(0, n): samples[i] = multi_distr.sample() weights[i] = 1.0 sample_distr = NaiveSampleDistribution(samples, weights) for i in range(0, n): self.assertTrue(sample_distr.sample() in samples)
def test_mean(self): mean = [10.0, 15.0] variances = [0.1, 0.5] multi_distr = GaussDistribution.create_independent(mean, variances) n = 100 samples = np.zeros((n, 2)) weights = np.zeros((n, )) for i in range(0, n): samples[i] = multi_distr.sample() weights[i] = 1.0 sample_distr = NaiveSampleDistribution(samples, weights) sample_mean = sample_distr.mean for i in range(0, len(mean)): self.assertTrue( abs(mean[0] - sample_mean[0]) < math.sqrt(variances[0]))
def update(self, measurements: np.array): op_matrix = self._measurement_matrix.dot( self._predicted.covariance).dot( self._measurement_matrix.transpose()) op_matrix = np.linalg.inv(op_matrix + self._measurement_noise.covariance) kalman_gain = self._predicted.covariance.dot( self._measurement_matrix.transpose()).dot(op_matrix) op_matrix = measurements - self._calculate_measurement() op_matrix = kalman_gain.dot(op_matrix) posterior_mean = self._predicted.mean + op_matrix op_matrix = kalman_gain.dot(self._measurement_matrix) op_matrix = (np.eye(N=op_matrix.shape[0], M=op_matrix.shape[1]) - op_matrix) posterior_covariance = op_matrix.dot(self._predicted.covariance) self._updated = GaussDistribution(mean=posterior_mean, covariance=posterior_covariance)
def predict(self, control: np.array): samples = self._sample_points(self._updated, self._state_noise) eval_samples = np.zeros((samples.shape[0], len(self._updated.mean))) for i in range(0, samples.shape[0]): eval_samples[i] = self._eval_state_func(control, samples[i]) mean = np.zeros(self._updated.mean.shape) for i in range(0, 2 * self._n + 1): mean += self._mean_weights[i] * eval_samples[i] cov = np.zeros(self._updated.covariance.shape) for i in range(0, 2 * self._n + 1): op_vector = eval_samples[i] - mean op_vector = op_vector.reshape((1, len(op_vector))) cov += self._cov_weights[i] * op_vector.transpose().dot(op_vector) cov += self._state_noise.covariance cov = self.fix_covariance(cov) self._predicted = GaussDistribution(mean=mean, covariance=cov)
def show(self): camera = CSVMultiChannelReader( path=self.camera_log_path, delimiter=';', parser=(lambda row, column, value: float(value))) robot = CSVMultiChannelReader(path=self.robot_log_path, delimiter=';', parser=parse_robot_log_column) camera_time_list = camera.channel_at_index(0) min_time = camera_time_list[0] max_time = camera_time_list[-1] merged = MultiChannelSyncTransformer([robot, camera], min_time, max_time) sonar_channel = merged.channel_at_index(1) gyro_channel = merged.channel_at_index(2) y_sensor_transformer = SonarAngleToPointsTransformer( channels=[sonar_channel, gyro_channel], zero_distance=self.sonar_zero_distance) x_camera = merged.channel_at_index(5) y_camera = merged.channel_at_index(6) plt.plot(x_camera, y_camera, 'r-', label='camera') plt.plot(x_camera, y_sensor_transformer.channel_at_index(0), 'y-', label='y-sonar') left_channel = merged.channel_at_index(3) right_channel = merged.channel_at_index(4) velocities_transformer = WheelToRobotTransformer( channels=[left_channel, right_channel], radius=self.wheel_radius, base_half=self.wheel_base_half) time_channel = merged.channel_at_index(0) v_channel = velocities_transformer.channel_at_index(0) w_channel = velocities_transformer.channel_at_index(1) kinematics_transformer = DifferentialDriveTransformer( [time_channel, v_channel, w_channel], init_x=self.init_x, init_y=self.init_y, init_angle=self.init_angle) x_kinemat = kinematics_transformer.channel_at_index(1) y_kinemat = kinematics_transformer.channel_at_index(2) plt.plot(x_kinemat, y_kinemat, 'b-', label='kinematic model') x_kalman = [] y_kalman = [] for i in range(0, len(merged.channel_at_index(0)) - 1): dt = merged.channel_at_index(0)[i + 1] - merged.channel_at_index(0)[i] v = v_channel[i] w = w_channel[i] self._robot.predict(v, w, dt, self._state_noise) x_cam = merged.channel_at_index(5)[i] y_cam = merged.channel_at_index(6)[i] sonar = merged.channel_at_index(1)[i] gyro = merged.channel_at_index(2)[i] if -math.radians(-30) < gyro < math.radians(30): sonar_noise = self.sonar_normal_noise else: sonar_noise = self.sonar_invalid_noise measurement_noise_mean = [ self.x_cam_noise[0], self.y_cam_noise[0], sonar_noise[0], self.gyro_noise[0] ] measurement_noise_vars = [ self.x_cam_noise[1], self.y_cam_noise[1], sonar_noise[1], self.gyro_noise[1] ] measurement_noise = GaussDistribution.create_independent( mean=measurement_noise_mean, variances=measurement_noise_vars) self._robot.update(x_cam, y_cam, sonar, gyro, measurement_noise) x_kalman.append(self._robot.state[0]) y_kalman.append(self._robot.state[1]) plt.plot(x_kalman, y_kalman, 'g-', label=self._label) plt.legend(loc='upper right') plt.show()
import math from core.distribution import GaussDistribution from core.robots import UKFRobot from simulation import RobotResultSimulation x_noise = (0.0, 100.0) y_noise = (0.0, 100.0) angle_noise = (0.0, math.radians(25.0)) initial_mean = [x_noise[0], y_noise[0], angle_noise[0]] initial_var = [x_noise[1], y_noise[1], angle_noise[1]] initial = GaussDistribution.create_independent(mean=initial_mean, variances=initial_var) robot = UKFRobot(initial, alpha=1.0, kappa=0.0, beta=0.0) simulation = RobotResultSimulation(robot, initial, 'ukf') simulation.show()
import math from core.distribution import GaussDistribution from core.robots import PFRobot from simulation import RobotResultSimulation x_noise = (0.0, 100.0) y_noise = (0.0, 100.0) angle_noise = (0.0, math.radians(25.0)) initial_mean = [x_noise[0], y_noise[0], angle_noise[0]] initial_var = [x_noise[1], y_noise[1], angle_noise[1]] initial = GaussDistribution.create_independent(mean=initial_mean, variances=initial_var) robot = PFRobot(initial, sample_size=1000, resampling_threshold=500) state_noise = GaussDistribution.create_independent( mean=[0.0, 0.0], variances=[25.0, math.radians(25.0)]) simulation = RobotResultSimulation(robot, state_noise, 'pf') simulation.show()