def plot_correlation_covariance(): P = [[4, 3.9], [3.9, 4]] plot_covariance_ellipse((5, 10), P, edgecolor='k', variance=[1, 2**2, 3**2]) plt.xlabel('X') plt.ylabel('Y') plt.gca().autoscale(tight=True) plt.axvline(7.5, ls='--', lw=1) plt.axhline(12.5, ls='--', lw=1) plt.scatter(7.5, 12.5, s=2000, alpha=0.5) plt.title('|4.0 3.9|\n|3.9 4.0|') plt.show()
def plot_3_covariances(): P = [[2, 0], [0, 2]] plt.subplot(131) plot_covariance_ellipse((2, 7), cov=P, facecolor='g', alpha=0.2, title='|2 0|\n|0 2|', axis_equal=False) plt.ylim((4, 10)) plt.gca().set_aspect('equal', adjustable='box') plt.subplot(132) P = [[2, 0], [0, 9]] plt.ylim((4, 10)) plt.gca().set_aspect('equal', adjustable='box') plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2, axis_equal=False, title='|2 0|\n|0 9|') plt.subplot(133) P = [[2, 1.2], [1.2, 2]] plt.ylim((4, 10)) plt.gca().set_aspect('equal', adjustable='box') plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2, axis_equal=False, title='|2 1.2|\n|1.2 2|') plt.tight_layout() plt.show()
M = array([[a1 * v**2 + a2 * w**2, 0], [0, a3 * v**2 + a4 * w**2]]) ekf.Q = dot(V, M).dot(V.T) ekf.predict(u=u) for lmark in m: d = sqrt((lmark[0] - xp[0, 0])**2 + (lmark[1] - xp[1, 0])**2) + randn() * sigma_r a = atan2(lmark[1] - xp[1, 0], lmark[0] - xp[0, 0]) - xp[2, 0] + randn() * sigma_h z = np.array([[d], [a]]) ekf.update(z, HJacobian=H_of, Hx=Hx, residual=residual, args=(lmark), hx_args=(lmark)) plot_covariance_ellipse((ekf.x[0, 0], ekf.x[1, 0]), ekf.P[0:2, 0:2], std=10, facecolor='g', alpha=0.3) #plt.plot(ekf.x[0], ekf.x[1], 'x', color='r') plt.axis('equal') plt.show()
cindex = 0 u = cmds[0] track = [] std = 16 while cindex < len(cmds): u = cmds[cindex] xp = move(xp, u, dt, wheelbase) # simulate robot track.append(xp) ukf.predict(fx_args=u) if cindex % 20 == 0: plot_covariance_ellipse((ukf.x[0], ukf.x[1]), ukf.P[0:2, 0:2], std=std, facecolor='b', alpha=0.58) #print(cindex, ukf.P.diagonal()) #print(ukf.P.diagonal()) z = [] for lmark in m: d = sqrt((lmark[0] - xp[0])**2 + (lmark[1] - xp[1])**2) + randn()*sigma_r bearing = atan2(lmark[1] - xp[1], lmark[0] - xp[0]) a = normalize_angle(bearing - xp[2] + randn()*sigma_h) z.extend([d, a]) #if cindex % 20 == 0: # plt.plot([lmark[0], lmark[0] - d*cos(a+xp[2])], [lmark[1], lmark[1]-d*sin(a+xp[2])], color='r') if cindex == 1197: plt.plot([lmark[0], lmark[0] - d2*cos(a2+xp[2])],
u = cmds[0] track = [] std = 16 while cindex < len(cmds): u = cmds[cindex] xp = move(xp, u, dt, wheelbase) # simulate robot track.append(xp) ukf.predict(fx_args=u) if cindex % 20 == 0: plot_covariance_ellipse((ukf.x[0], ukf.x[1]), ukf.P[0:2, 0:2], std=std, facecolor='b', alpha=0.58) #print(cindex, ukf.P.diagonal()) #print(ukf.P.diagonal()) z = [] for lmark in m: d = sqrt((lmark[0] - xp[0])**2 + (lmark[1] - xp[1])**2) + randn() * sigma_r bearing = atan2(lmark[1] - xp[1], lmark[0] - xp[0]) a = normalize_angle(bearing - xp[2] + randn() * sigma_h) z.extend([d, a]) #if cindex % 20 == 0: # plt.plot([lmark[0], lmark[0] - d*cos(a+xp[2])], [lmark[1], lmark[1]-d*sin(a+xp[2])], color='r')
dt = 0.1 np.random.seed(1234) for i in range(1000): xp, _ = ekfloc_predict(xp, P, u, dt) plt.plot(xp[0], xp[1], 'x', color='g', ms=20) if i % 10 == 0: zs = [] for lmark in m: d = sqrt((lmark[0] - xp[0, 0])**2 + (lmark[1] - xp[1, 0])**2) + randn() * sigma_r a = atan2(lmark[1] - xp[1, 0], lmark[0] - xp[0, 0]) - xp[2, 0] + randn() * sigma_h zs.append(np.array([d, a])) x, P = ekfloc2(x, P, u, zs, c, m, dt * 10) if P[0, 0] < 10000: plot_covariance_ellipse((x[0, 0], x[1, 0]), P[0:2, 0:2], std=2, facecolor='g', alpha=0.3) plt.plot(x[0], x[1], 'x', color='r') plt.axis('equal') plt.show()
V = array( [[(-sinh + sinhwdt)/w, v*(sin(h)-sinhwdt)/(w**2) + v*coshwdt*dt/w], [(cosh - coshwdt)/w, -v*(cosh-coshwdt)/(w**2) + v*sinhwdt*dt/w], [0, dt]]) # covariance of motion noise in control space M = array([[a1*v**2 + a2*w**2, 0], [0, a3*v**2 + a4*w**2]]) ekf.Q = dot(V, M).dot(V.T) ekf.predict(u=u) for lmark in m: d = sqrt((lmark[0] - xp[0, 0])**2 + (lmark[1] - xp[1, 0])**2) + randn()*sigma_r a = atan2(lmark[1] - xp[1, 0], lmark[0] - xp[0, 0]) - xp[2, 0] + randn()*sigma_h z = np.array([[d], [a]]) ekf.update(z, HJacobian=H_of, Hx=Hx, residual=residual, args=(lmark), hx_args=(lmark)) plot_covariance_ellipse((ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2], std=10, facecolor='g', alpha=0.3) #plt.plot(ekf.x[0], ekf.x[1], 'x', color='r') plt.axis('equal') plt.show()
u = array([1.1, .01]) xp = ekf.x.copy() plt.figure() plt.scatter(m[:, 0], m[:, 1]) for i in range(250): xp = ekf.move(xp, u, dt/10.) # simulate robot plt.plot(xp[0], xp[1], ',', color='g') if i % 10 == 0: ekf.predict(u=u) plot_covariance_ellipse((ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2], std=3, facecolor='b', alpha=0.08) for lmark in m: d = sqrt((lmark[0] - xp[0, 0])**2 + (lmark[1] - xp[1, 0])**2) + randn()*sigma_r a = atan2(lmark[1] - xp[1, 0], lmark[0] - xp[0, 0]) - xp[2, 0] + randn()*sigma_h z = np.array([[d], [a]]) ekf.update(z, HJacobian=H_of, Hx=Hx, residual=residual, args=(lmark), hx_args=(lmark)) plot_covariance_ellipse((ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2], std=3, facecolor='g', alpha=0.4) #plt.plot(ekf.x[0], ekf.x[1], 'x', color='r') plt.axis('equal')
plt.plot(m[:, 0], m[:, 1], 'o') plt.plot(x[0], x[1], 'x', color='b', ms=20) xp = x.copy() dt = 0.1 np.random.seed(1234) for i in range(1000): xp, _ = ekfloc_predict(xp, P, u, dt) plt.plot(xp[0], xp[1], 'x', color='g', ms=20) if i % 10 == 0: zs = [] for lmark in m: d = sqrt((lmark[0] - xp[0, 0])**2 + (lmark[1] - xp[1, 0])**2) + randn()*sigma_r a = atan2(lmark[1] - xp[1, 0], lmark[0] - xp[0, 0]) - xp[2, 0] + randn()*sigma_h zs.append(np.array([d, a])) x, P = ekfloc2(x, P, u, zs, c, m, dt*10) if P[0,0] < 10000: plot_covariance_ellipse((x[0,0], x[1,0]), P[0:2, 0:2], std=2, facecolor='g', alpha=0.3) plt.plot(x[0], x[1], 'x', color='r') plt.axis('equal') plt.show()