def update_particle(particle, v_c, w_c, v_c_cov, w_c_cov, z, thk, motion_noise, dt, true_pos, true_neg, alpha, beta, z_max, first_time, num_particles): #sample the motion model x = np.array([[particle.x], [particle.y], [particle.theta]]) x_next = dynamics.propogate_next_state(x, v_c + stat_filter.noise(v_c_cov), w_c + stat_filter.noise(w_c_cov), dt) # particle.x = copy.deepcopy(x_next[0][0]) # particle.y = copy.deepcopy(x_next[1][0]) # particle.theta = copy.deepcopy(dynamics.wrap_angle(x_next[2][0])) particle.x = x_next[0][0] particle.y = x_next[1][0] particle.theta = dynamics.wrap_angle(x_next[2][0]) #calculate weights from measurement model if first_time: particle.weight = 1.0 / num_particles else: particle.weight = ogc.likelihood_field_range_finder_model( z, x_next, thk, z_max, particle.occ_grid) #update the map for the particle X = [particle.x, particle.y, particle.theta] particle.occ_grid = ogc.occupancy_grid_mapping(particle.occ_grid, X, z, thk, true_pos, true_neg, alpha, beta, z_max) return particle
def mcl_turtlebot(chi, v_c, w_c, ranges, bearings, landmark_pts, dt, alpha, sigma_r, sigma_phi): num_points = len(chi) num_states = len(chi[0]) chi_bar = np.zeros((num_points, num_states, 1)) weights = [] for i in range(0, num_points): v_c_cov = alpha[0] * v_c * v_c + alpha[1] * w_c * w_c w_c_cov = alpha[2] * v_c * v_c + alpha[3] * w_c * w_c x_next = dynamics.propogate_next_state( chi[i], v_c + stat_filter.noise(v_c_cov), w_c + stat_filter.noise(w_c_cov), dt) for j in range(0, num_states): chi_bar[i][j] = x_next[j] weight = 1 for j in range(0, len(landmark_pts)): f = [ranges[j], bearings[j]] weight = weight * dynamics.landmark_measurement_model( f, chi_bar[i], landmark_pts[j], sigma_r, sigma_phi) weights.append(weight) weights = weights / np.sum(weights) chi = stat_filter.low_variance_sampler(chi_bar, weights) return chi
def ukf_turtlebot(mu, sigma, v_c, w_c, range_ldm, bearing_ldm, landmark_pts, dt, Q, alpha): M = np.array([[alpha[0]*v_c*v_c + alpha[1]*w_c*w_c, 0.0], [0.0, alpha[2]*v_c*v_c + alpha[3]*w_c*w_c]]) mu_aug = np.zeros((7,1)) sigma_aug = np.zeros((7,7)) for i in range(0,len(mu)): mu_aug[i][0] = mu[i][0] sigma_aug = fill_sigma_aug(sigma_aug, sigma, M, Q) sigma_points, wm, wc = generate_sigma_points_and_weights(mu_aug, sigma_aug) n = len(mu_aug) x_sigma_points = np.zeros((len(mu), (2*n+1))) Z_bar = np.zeros((len(Q),(2*n+1))) #Pass sigma points through motion model and predict observations for i in range(0,len(sigma_points[0])): x_sigma_points[:,[i]] = dynamics.propogate_next_state(sigma_points[0:3,[i]], v_c+sigma_points[3][i], w_c+sigma_points[4][i], dt) q = dynamics.norm([landmark_pts[0] - x_sigma_points[0][i], landmark_pts[1] - x_sigma_points[1][i]]) #this is not the same q as in the book (it is sqrt(q)) Z_bar[:,[i]] = np.array([[q],[np.arctan2(landmark_pts[1]-x_sigma_points[1][i],landmark_pts[0]-x_sigma_points[0][i]) - x_sigma_points[2][i]]]) + sigma_points[5:7,[i]] #compute Gaussian statistics mu_bar = np.zeros((len(mu), 1)) sigma_bar = np.zeros((len(mu), len(mu))) z_hat = np.zeros((len(Q),1)) S = np.zeros((len(Q),len(Q))) sigma_xy = np.zeros((len(mu),len(Q))) for i in range(0,2*n+1): mu_bar = mu_bar + wm[i]*x_sigma_points[:,[i]] for i in range(0,2*n+1): sigma_bar = sigma_bar + wc[i]*np.dot((x_sigma_points[:,[i]] - mu_bar),np.transpose((x_sigma_points[:,[i]] - mu_bar))) for i in range(0,2*n+1): z_hat = z_hat + wm[i]*Z_bar[:,[i]] for i in range(0,2*n+1): S = S + wc[i]*np.dot((Z_bar[:,[i]] - z_hat),np.transpose((Z_bar[:,[i]] - z_hat))) sigma_xy = sigma_xy + wc[i]*np.dot((x_sigma_points[:,[i]] - mu_bar),np.transpose((Z_bar[:,[i]] - z_hat))) #update mean and covariance K_plot = np.zeros((1, 1)) K = np.dot(sigma_xy, np.linalg.inv(S)) z = np.array([[range_ldm], [bearing_ldm]]) mu = mu_bar + np.dot(K, (z - z_hat)) sigma = sigma_bar - np.dot(np.dot(K,S),np.transpose(K)) K_plot[0][0] = np.linalg.norm(K) return mu, sigma, K_plot
x_err = [] y_err = [] theta_err = [] x_err.append(mu[0][0]-x[0][0]) y_err.append(mu[0][1]-x[0][1]) theta_err.append(mu[0][2]-x[0][2]) K_plot = [[],[],[]] t_plot = [[],[],[]] print t_plot plt.ion() for i in range(0, len(v_c)-1): x_next = dynamics.propogate_next_state(x[i], v[i], w[i], dt) x.append(x_next) x_plot.append(x[i+1][0]) y_plot.append(x[i+1][1]) theta_plot.append(x[i+1][2]) ranges, bearings = simulate_sensor_data(x_next, sigma_r, sigma_phi) mu_next, sigma, K_out = ekf.ekf_turtlebot(mu[i], sigma, v_c[i], w_c[i], ranges, bearings, landmark_pts, dt, Q, alpha) # landmark_idx = i%len(landmark_pts) # mu_next, sigma, K_out = ukf.ukf_turtlebot(mu[i], sigma, v_c[i], w_c[i], ranges[landmark_idx], bearings[landmark_idx], landmark_pts[landmark_idx], dt, Q, alpha) mu.append(mu_next) # K_plot[landmark_idx].append(K_out[0][0])
def ekf_slam_turtlebot(mu, sigma, v_c, w_c, ranges, bearings, correspondence, dt, Q, alpha): num_states = len(mu) theta = mu[2][0] F = np.zeros((3, num_states)) F[0][0] = 1 F[1][1] = 1 F[2][2] = 1 mu[0:3] = dynamics.propogate_next_state(mu[0:3], v_c, w_c, dt) G = np.array([[ 0.0, 0.0, -v_c / w_c * np.cos(theta) + v_c / w_c * np.cos(theta + w_c * dt) ], [ 0.0, 0.0, -v_c / w_c * np.sin(theta) + v_c / w_c * np.sin(theta + w_c * dt) ], [0.0, 0.0, 0.0]]) G = np.eye(num_states) + np.dot(F.T, np.dot(G, F)) V = np.array([[(-np.sin(theta) + np.sin(theta + w_c * dt)) / w_c, (v_c * (np.sin(theta) - np.sin(theta + w_c * dt))) / (w_c * w_c) + (v_c * np.cos(theta + w_c * dt) * dt) / w_c], [(np.cos(theta) - np.cos(theta + w_c * dt)) / w_c, (-v_c * (np.cos(theta) - np.cos(theta + w_c * dt))) / (w_c * w_c) + (v_c * np.sin(theta + w_c * dt) * dt) / w_c], [0.0, dt]]) M = np.array([[alpha[0] * v_c * v_c + alpha[1] * w_c * w_c, 0.0], [0.0, alpha[2] * v_c * v_c + alpha[3] * w_c * w_c]]) sigma_bar = np.dot(np.dot(G, sigma), G.T) + np.dot( F.T, np.dot(np.dot(np.dot(V, M), V.T), F)) # K_plot = np.full((len(landmark_pts), 1),0.0) for i in range(0, len(ranges)): j = correspondence[i] if ekf_slam_turtlebot.seen.count(j) < 1: ekf_slam_turtlebot.seen.append(j) mu_j = mu[0:2] + np.array([ ranges[i] * np.cos(bearings[i] + mu[2][0]), ranges[i] * np.sin(bearings[i] + mu[2][0]) ]) mu = np.append(mu, mu_j, axis=0) sigma_bar = np.append(sigma_bar, np.zeros((2, len(sigma_bar))), axis=0) sigma_bar = np.append(sigma_bar, np.zeros((len(sigma_bar), 2)), axis=1) sigma_bar[len(sigma_bar) - 1][len(sigma_bar) - 1] = 10 sigma_bar[len(sigma_bar) - 2][len(sigma_bar) - 2] = 10 idx = ekf_slam_turtlebot.seen.index(j) delta = mu[3 + 2 * idx:3 + 2 * (idx + 1)] - mu[0:2] q = np.dot(delta.T, delta)[0][0] z_hat = np.array([[math.sqrt(q)], [np.arctan2(delta[1], delta[0]) - mu[2][0]]]) Fx_j = np.zeros((6, len(mu))) Fx_j[0][0] = 1 Fx_j[1][1] = 1 Fx_j[2][2] = 1 Fx_j[3][3 + 2 * idx] = 1 Fx_j[4][4 + 2 * idx] = 1 H = [[ -1.0 * math.sqrt(q) * delta[0][0], -1.0 * math.sqrt(q) * delta[1][0], 0.0, math.sqrt(q) * delta[0][0], math.sqrt(q) * delta[1][0], 0.0 ], [delta[1][0], -delta[0][0], -q, -delta[1][0], delta[0][0], 0]] H = 1 / q * np.dot(H, Fx_j) S = np.dot(np.dot(H, sigma_bar), H.T) + Q K = np.dot(np.dot(sigma_bar, H.T), np.linalg.inv(S)) z = np.array([[ranges[i]], [bearings[i]]]) z_int = z - z_hat z_int[1] = dynamics.wrap_angle(z_int[1]) mu = mu + np.dot(K, (z_int)) sigma_bar = np.dot((np.eye(len(mu)) - np.dot(K, H)), sigma_bar) # K_plot[j][0] = np.linalg.norm(K) sigma = sigma_bar return mu, sigma #, K_plot
def ekf_turtlebot(mu, sigma, v_c, w_c, ranges, bearings, landmark_pts, dt, Q, alpha): theta = mu[2][0] G = np.array([[ 1.0, 0.0, -v_c / w_c * np.cos(theta) + v_c / w_c * np.cos(theta + w_c * dt) ], [ 0.0, 1.0, -v_c / w_c * np.sin(theta) + v_c / w_c * np.sin(theta + w_c * dt) ], [0.0, 0.0, 1.0]]) V = np.array([[(-np.sin(theta) + np.sin(theta + w_c * dt)) / w_c, (v_c * (np.sin(theta) - np.sin(theta + w_c * dt))) / (w_c * w_c) + (v_c * np.cos(theta + w_c * dt) * dt) / w_c], [(np.cos(theta) - np.cos(theta + w_c * dt)) / w_c, (-v_c * (np.cos(theta) - np.cos(theta + w_c * dt))) / (w_c * w_c) + (v_c * np.sin(theta + w_c * dt) * dt) / w_c], [0.0, dt]]) M = np.array([[alpha[0] * v_c * v_c + alpha[1] * w_c * w_c, 0.0], [0.0, alpha[2] * v_c * v_c + alpha[3] * w_c * w_c]]) mu = dynamics.propogate_next_state(mu, v_c, w_c, dt) sigma_bar = np.dot(np.dot(G, sigma), np.transpose(G)) + np.dot( np.dot(V, M), np.transpose(V)) K_plot = np.full((len(landmark_pts), 1), 0.0) for j in range(0, len(landmark_pts)): q = dynamics.norm([ landmark_pts[j][0] - mu[0], landmark_pts[j][1] - mu[1] ]) #this is not the same q as in the book (it is sqrt(q)) z_hat = np.array([[q], [ np.arctan2(landmark_pts[j][1] - mu[1], landmark_pts[j][0] - mu[0]) - mu[2] ]]) H = np.array([[ -(landmark_pts[j][0] - mu[0][0]) / q, -(landmark_pts[j][1] - mu[1][0]) / q, 0.0 ], [(landmark_pts[j][1] - mu[1][0]) / (q * q), -(landmark_pts[j][0] - mu[0][0]) / (q * q), -1.0]]) S = np.dot(np.dot(H, sigma_bar), np.transpose(H)) + Q K = np.dot(np.dot(sigma_bar, np.transpose(H)), np.linalg.inv(S)) z = np.array([[ranges[j]], [bearings[j]]]) mu = mu + np.dot(K, (z - z_hat)) sigma_bar = np.dot((np.eye(3) - np.dot(K, H)), sigma_bar) K_plot[j][0] = np.linalg.norm(K) sigma = sigma_bar return mu, sigma, K_plot
def fast_slam_known_correspondence_turtlebot(v_c, w_c, ranges, bearings, correspondences, particles, dt, Q, alpha): #calculated a fit over experiments with values exponent = -0.0556 * len(ranges) + 8.5556 scale = 10**exponent counter = 0 for i in range(len(particles)): v_c_cov = alpha[0] * v_c * v_c + alpha[1] * w_c * w_c w_c_cov = alpha[2] * v_c * v_c + alpha[3] * w_c * w_c x = [[particles[i].x], [particles[i].y], [particles[i].theta]] # print "x before: ", particles[i].x # print "y before: ", particles[i].y # print "theta before: ", particles[i].theta x_next = dynamics.propogate_next_state( x, v_c + stat_filter.noise(v_c_cov), w_c + stat_filter.noise(w_c_cov), dt) particles[i].x = copy.deepcopy(x_next[0][0]) particles[i].y = copy.deepcopy(x_next[1][0]) particles[i].theta = copy.deepcopy(dynamics.wrap_angle(x_next[2][0])) # print i # print "x_p0: ", particles[0].x # print "y_p0: ", particles[0].y # print "theta_p0: ", particles[0].theta # print "x: ", particles[i].x # print "y: ", particles[i].y # print "theta: ", particles[i].theta # pdb.set_trace() particles[i].weight = 1.0 for k in range(len(ranges)): j = correspondences[k] if particles[i].seen.count(j) < 1: particles[i].seen.append(j) # print "particle len: ", len(particles) # print "range len: ", len(ranges) # counter += 1 # print counter x = particles[i].x + ranges[k] * np.cos(bearings[k] + particles[i].theta) y = particles[i].y + ranges[k] * np.sin(bearings[k] + particles[i].theta) ldm = landmark.Landmark(j, x, y) particles[i].landmarks.append(ldm) H = calc_H(ldm, particles[i])[0] H_inv = np.linalg.inv(H) ldm.sigma = np.dot(np.dot(H_inv, Q), H_inv.T) # print "sigma: ", ldm.sigma # print "H: ", H ldm.weight = scale * 1.0 / len(particles) particles[i].weight *= ldm.weight # particles[i].weight = 1.0/len(particles) else: idx = particles[i].seen.index(j) ldm = particles[i].landmarks[idx] z_hat, H = calc_z_hat_and_H(ldm, particles[i]) S = np.dot(np.dot(H, ldm.sigma), H.T) + Q K = np.dot(np.dot(ldm.sigma, H.T), np.linalg.inv(S)) z = np.array([[ranges[k]], [bearings[k]]]) res = np.array(z - z_hat) res[1][0] = dynamics.wrap_angle(res[1][0]) mu = np.array([[ldm.x], [ldm.y]]) + np.dot(K, res) ldm.x = mu[0][0] ldm.y = mu[1][0] ldm.sigma = np.dot(np.eye(2) - np.dot(K, H), ldm.sigma) particles[i].landmarks[idx] = ldm # particles[i].weight = np.linalg.det(2*np.pi*S)*np.exp(-.5*np.dot(np.dot(res.T,np.linalg.inv(S)),res))[0][0] ldm.weight = scale * (np.linalg.det(2 * np.pi * S) * np.exp( -.5 * np.dot(np.dot(res.T, np.linalg.inv(S)), res))[0][0]) particles[i].weight *= ldm.weight # print "Weights: ", particles[i].weight, " For: ", i print "Weights: ", particles[i].weight, " For: ", i particles = fast_slam_particle.normalize_weights(particles) new_particles = stat_filter.low_variance_sampler(particles) return new_particles