def update_radar(self, measurement): """ Radar measurement update procedure :param measurement: Radar measurement """ # Rewrite measurement vector range = measurement.data[0] azimuth = measurement.data[1] range_rate = measurement.data[2] """ PLACE YOUR CODE HERE TODO: Implement radar update (correction) step - Calculate predicted measurement based on current state self._x - Calculate error between real measurement and this predicted from state - Be careful with angle as it rolls around (error between -179 deg and 179 deg is only 2 deg - use normalize_angle function from tools package) - Use measurement noise matrix self._R_radar - As observation model is non-linear, H matrix (observation model) will have to be linearization (Jacobian matrix) of it in given state. To ease it, use _calc_jacobian_for_radar function where this is calculated for you - Follow Extended Kalman filter update procedure - update self._x and self._P """ self.R_ = self._R_radar h = self._calc_jacobian_for_radar() px = self._x[0] py = self._x[1] vx = self._x[2] vy = self._x[3] #hj = np.array([np.sqrt(vx*vx+vy*vy), np.arctan(py/px), (vx+)]) rho = np.sqrt(px**2 + py**2) phi = 0 rho_dot = 0 if abs(rho) > 0.0001: phi = normalize_angle(atan2(py,px)) rho_dot = ((px * vx + py * vy) / rho) z = np.array([range, azimuth, range_rate]) z_pred = np.array([rho, phi, rho_dot]) y = z - z_pred y[1] = normalize_angle(y[1]) ht = h.transpose() PHt = self._P @ ht S = h @ PHt + self._R_radar Si = np.linalg.pinv(S) K = PHt @ Si self._x = self._x + (K @ y) I = np.eye(4) self._P = (I - K @ h) @ self._P
def set_angle(self, angle): if angle: angle = tools.normalize_angle(angle) if angle and Pose.match_team == TEAM_YELLOW: self.real_pose.angle = -angle else: self.real_pose.angle = angle
def process_raw_odometry(): """ Process raw odometry data from file to create rt1, tr, rt2 odometry model for SLAM This function generates a file containing odometry data structure for SLAM. """ x_p = 0 y_p = 0 z_p = 0 out_file = open('new.dat', 'w') data = read_raw("raw_Odometry.dat") for (step, reading) in enumerate(data): odometry = [ reading['odometry']['x'], reading['odometry']['y'], reading['odometry']['z'] ] print odometry x = odometry[0] y = odometry[1] z = odometry[2] #drawing.draw_state_for_me(step, x, y, z) z = tools.normalize_angle(z) rt_1 = math.atan2(y - y_p, x - x_p) - z_p t = math.sqrt((x - x_p)**2 + (y - y_p)**2) rt_2 = z - z_p - rt_1 x_p = x y_p = y z_p = z out_file.write('%s %.7f %.7f %.7f\n' % ('ODOMETRY', rt_1, t, rt_2)) out_file.close()
def set_angle(self, angle): if angle : angle = tools.normalize_angle(angle) if angle and Pose.match_team == TEAM_YELLOW: self.real_pose.angle = -angle else : self.real_pose.angle = angle
def process_raw_odometry(): """ Process raw odometry data from file to create rt1, tr, rt2 odometry model for SLAM This function generates a file containing odometry data structure for SLAM. """ x_p = 0 y_p = 0 z_p = 0 out_file = open('new.dat','w') data = read_raw("raw_Odometry.dat") for (step, reading) in enumerate(data): odometry = [reading['odometry']['x'], reading['odometry']['y'], reading['odometry']['z']] print odometry x = odometry[0] y = odometry[1] z= odometry[2] #drawing.draw_state_for_me(step, x, y, z) z = tools.normalize_angle(z) rt_1 = math.atan2(y-y_p , x-x_p) - z_p t = math.sqrt((x - x_p)**2+(y - y_p)**2) rt_2 = z - z_p - rt_1 x_p = x y_p = y z_p = z out_file.write('%s %.7f %.7f %.7f\n' %('ODOMETRY', rt_1, t, rt_2)) out_file.close()
def correct(self, observations, noise=0.1): """ Correction step. Remember to initialize landmarks outside...!!! @param observations: list in the form [[index,range,bearing],...] @param noise: @rtype : none """ # Update for every observed landmark # Build Qr, noise for the measurement. Assume diagonal matrix for Q Q = np.eye(2) * noise for zi in observations: # zi is current observation n = zi[0] z = np.array(zi[1:]) z.shape = (2, 1) # Get Jacobian and expected observation (h, z_p) = self.get_expected_landmark_observation(n) # print "HiLow:" # print h # Build Fx to map low dimensional h to high dimensional H fx = np.zeros((5, self.n), dtype=int) fx[:3, :3] = np.eye(3) i = 3 + (n - 1) * 2 # fx[3:5,i:i+2] = np.eye(2) fx[3, i] = 1 fx[4, i + 1] = 1 H = h.dot(fx) # P:Sigma # Calculate Kalman gain # K = P*H'*S where S = inv(HPH' + Q) S = H.dot(self.Sigma.dot(H.transpose())) + Q K = self.Sigma.dot(H.transpose().dot(np.linalg.pinv(S))) # Correct the new mean (depends on the observations) # Xt = Xt-1 + K*(z-z') # Remember to normalize the bearings... dz = tools.normalize_bearings(z - z_p) # Kt = K.dot(z - z_p) Kt = K.dot(dz) Kt.shape = self.Xt.shape # print Kt + self.Xt # self.Xt += K.dot(z - z_p) self.Xt += Kt # print self.Pose # print Kt.shape # exit() # Update the covariance matrix # P = (I- K*H)*P self.Sigma = (np.eye(self.n) - K.dot(H)).dot(self.Sigma) # And normalize the pose angle... self.Pose[2] = tools.normalize_angle(self.Pose[2])
def predict(self, u, noise=0.1): # Predict the new mean (depends on the model) # Get the Jacobian of the motion # Update the covariance matrix gx = self.motion_model(u) # print "Gx" # print gx # TODO: check why the views have problems, meanwhile used them directly # self.SigmaXX = gx.dot(self.SigmaXX.dot(gx.transpose())) self.Sigma[:3, :3] = gx.dot(self.Sigma[:3, :3].dot(gx.transpose())) # self.SigmaXM = gx.dot(self.SigmaXM) self.Sigma[:3, 3:] = gx.dot(self.Sigma[:3, 3:]) # self.SigmaMX = self.SigmaMX.dot(gx.transpose()) # self.SigmaMX = self.SigmaXM.transpose() self.Sigma[3:, :3] = self.Sigma[:3, 3:].transpose() # Add Rx (noise) r_x = np.eye(3) * noise r_x[2, 2] /= 10 # self.SigmaXX += r_x self.Sigma[:3, :3] = self.Sigma[:3, :3] + r_x # Normalize angle... self.Pose[2] = tools.normalize_angle(self.Pose[2])
def prediction_step(mu, sigma, u, sigmot): a = 3 ''' % Updates the belief concerning the robot pose according to the motion model, % mu: 2N+3 x 1 vector representing the state mean % sigma: 2N+3 x 2N+3 covariance matrix % u: odometry reading (r1, t, r2) % Use u.r1, u.t, and u.r2 to access the rotation and translation values [m,n] = size(sigma); v = zeros(m,1); % TODO: Compute the new mu based on the noise-free (odometry-based) motion model % Remember to normalize theta after the update (hint: use the function normalize_angle available in tools) % % TODO: Construct the full Jacobian G G = [Gx, zeros(3, n-3); zeros(n-3, 3), eye(n-3)]; % % TODO: Motion noise R % Compute the predicted sigma after incorporating the motion sigma = G*sigma*G' + R; ''' sigma_rot1, sigma_t, sigma_rot2 = sigmot['sigma_rot1'], sigmot['sigma_t'], sigmot['sigma_rot2'] m, n = sigma.shape prev_theta = mu[2].copy() Fx = np.hstack([np.eye(3), np.zeros([3, mu.shape[0] - 3])]) mu += Fx.T @ np.array([u['t'] * np.cos(prev_theta + u['r1']), u['t'] * np.sin(prev_theta + u['r1']), normalize_angle(u['r1'] + u['r2'])]) mu[2] = normalize_angle(mu[2]) # % TODO: Compute the 3x3 Jacobian Gx of the motion model # Gx = np.eye(3) # Gx[0, 2] = -u['t'] * np.sin(prev_theta + u['r1']) # Gx[1, 2] = u['t'] * np.cos(prev_theta + u['r1']) # G = np.vstack([np.hstack([Gx, np.zeros([3, n - 3])]), np.hstack([np.zeros([n - 3, 3]), np.eye(n - 3)])]) Gx = np.hstack([np.zeros([3,2]), np.vstack([-u['t']*np.sin(prev_theta + u['r1']), u['t'] * np.cos(prev_theta + u['r1']), 0])]) G = np.eye(n) + Fx.T@Gx@Fx # % TODO: Compute the 3x3 Jacobian Rx of the motion model if True: R_tilda = np.diag([sigma_rot1 ** 2, sigma_t ** 2, sigma_rot2 ** 2]) V = np.zeros([3, 3]) V[0, 0] = -u['t'] * np.sin(prev_theta + u['r1']) V[1, 0] = u['t'] * np.cos(prev_theta + u['r1']) V[2, 0] = 1. V[0, 1] = np.cos(prev_theta + u['r1']) V[1, 1] = np.sin(prev_theta + u['r1']) V[2, 2] = 1. Rx = V @ R_tilda @ V.T R = Fx.T @ Rx @ Fx else: R3 = np.array([[sigma_rot1, 0, 0], [0, sigma_t, 0], [0, 0, sigma_rot2 / 10]]) R = np.zeros_like(sigma) R[0: 3, 0: 3] = R3 sigma = G @ sigma @ G.T + R # TODO: think about G and R return mu, sigma
ly = [] for landmark in world: lx.append(landmark['x']) ly.append(landmark['y']) landmarks = [lx, ly] print "Beginning EKFSlam Test" # Keep track of the observed landmarks observed_landmarks = [] slam = EKFSlam(7) for (step, reading) in enumerate(data): logging.info('Step number:'+str(step)) # if step == 10: # break r1 = tools.normalize_angle(reading['odometry']['r1']) r2 = tools.normalize_angle(reading['odometry']['r2']) odometry = [r1, reading['odometry']['t'], r2] slam.predict(odometry) # print "Mu and Sigma after Prediction" # print "Mu:" # print slam.Xt # print "Sigma:" # print slam.Sigma # print "\t\tr1: "+str(odometry[0])+" t: "+str(odometry[1])+" r2 :"+str(odometry[2]) # Correct with each measurement sensor_reading = [] for sensor in reading['sensor']: # sensor_reading = [sensor['id'], sensor['range'], sensor['bearing']] if sensor['id'] != 0: logging.info('Seeing landmark id:'+str(sensor['id']))
ly = [] for landmark in world: lx.append(landmark['x']) ly.append(landmark['y']) landmarks = [lx, ly] print "Beginning EKFSlam Test" # Keep track of the observed landmarks observed_landmarks = [] slam = EKFSlam(7) for (step, reading) in enumerate(data): logging.info('Step number:' + str(step)) # if step == 10: # break r1 = tools.normalize_angle(reading['odometry']['r1']) r2 = tools.normalize_angle(reading['odometry']['r2']) odometry = [r1, reading['odometry']['t'], r2] slam.predict(odometry) # print "Mu and Sigma after Prediction" # print "Mu:" # print slam.Xt # print "Sigma:" # print slam.Sigma # print "\t\tr1: "+str(odometry[0])+" t: "+str(odometry[1])+" r2 :"+str(odometry[2]) # Correct with each measurement sensor_reading = [] for sensor in reading['sensor']: # sensor_reading = [sensor['id'], sensor['range'], sensor['bearing']] if sensor['id'] != 0: logging.info('Seeing landmark id:' + str(sensor['id']))
def correction_step(mu, sigma, z, observedLandmarks, sigmot): a = 3 ''' % Updates the belief, i. e., mu and sigma after observing landmarks, according to the sensor model % The employed sensor model measures the range and bearing of a landmark % mu: 2N+3 x 1 vector representing the state mean. % The first 3 components of mu correspond to the current estimate of the robot pose [x; y; theta] % The current pose estimate of the landmark with id = j is: [mu(2*j+2); mu(2*j+3)] % sigma: 2N+3 x 2N+3 is the covariance matrix % z: struct array containing the landmark observations. % Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing % The vector observedLandmarks indicates which landmarks have been observed % at some point by the robot. % observedLandmarks(j) is false if the landmark with id = j has never been observed before. ''' # % Number of measurements in this time step m = len(z) # % Number of dimensions to mu dim = mu.shape[0] ''' % Z: vectorized form of all measurements made in this time step: [range_1; bearing_1; range_2; bearing_2; ...; range_m; bearing_m] % ExpectedZ: vectorized form of all expected measurements in the same form. % They are initialized here and should be filled out in the for loop below ''' Z = np.zeros(m * 2) expectedZ = np.zeros(m * 2) ''' % Iterate over the measurements and compute the H matrix % (stacked Jacobian blocks of the measurement function) % H will be 2m x 2N+3 ''' for ii in range(m): # % Get the id of the landmark corresponding to the i-th observation landmarkId = z[ii]['id'] # % If the landmark is obeserved for the first time: if observedLandmarks[landmarkId - 1] == False: # % TODO: Initialize its pose in mu based on the measurement and the current robot pose: mu[2 * landmarkId + 1] = mu[0] + z[ii]['range'] * np.cos(z[ii]['bearing'] + mu[2]) mu[2 * landmarkId + 2] = mu[1] + z[ii]['range'] * np.sin(z[ii]['bearing'] + mu[2]) observedLandmarks[landmarkId - 1] = True # % TODO: Add the landmark measurement to the Z vector Z[2 * ii: 2 * ii + 2] = [z[ii]['range'], z[ii]['bearing']] # % TODO: Use the current estimate of the landmark pose # % to compute the corresponding expected measurement in expectedZ: delta = mu[2 * landmarkId + 1: 2 * landmarkId + 3] - mu[0: 2] q = delta.T @ delta expectedZ[2 * ii:2 * ii + 2] = [np.sqrt(q), normalize_angle(np.arctan2(delta[1], delta[0]) - mu[2])] # % TODO: Compute the Jacobian Hi of the measurement function h for this observation # % Map Jacobian Hi to high dimensional space by a mapping matrix Fxj Fxj = np.zeros([5, dim]) Fxj[0:3, 0:3] = np.eye(3) Fxj[3, 2 * landmarkId + 1] = 1. Fxj[4, 2 * landmarkId + 2] = 1. Hi = (1 / q) * np.array( [[-np.sqrt(q) * delta[0], -np.sqrt(q) * delta[1], 0, np.sqrt(q) * delta[0], np.sqrt(q) * delta[1]], [delta[1], -delta[0], -q, -delta[1], delta[0]]]) Hi = Hi @ Fxj # % Augment H with the new Hi if ii == 0: H = Hi else: H = np.vstack([H, Hi]) # % TODO: Construct the sensor noise matrix Q sigma_r_squar, sigma_phi_squar = sigmot['sigma_r_squar'], sigmot['sigma_phi_squar'] Q = np.diag([sigma_r_squar, sigma_phi_squar]*m) # % TODO: Compute the Kalman gain K = sigma @ H.T @ np.linalg.inv(H @ sigma @ H.T + Q) # % TODO: Compute the difference between the expected and recorded measurements. # % Remember to normalize the bearings after subtracting! # % (hint: use the normalize_all_bearings function available in tools) delta_Z = normalize_all_bearings(Z - expectedZ) # % TODO: Finish the correction step by computing the new mu and sigma. # % Normalize theta in the robot pose. mu = mu + K @ delta_Z sigma = (np.eye(dim) - K @ H) @ sigma return mu, sigma, observedLandmarks