def propagate(T0, P, Upsilon, Q, method, dt, g, cholQ=0): """Propagate state for one time step""" Gamma = f_Gamma(g, dt) Phi = f_flux(T0, dt) # propagate the mean T = Gamma.mm(Phi).mm(Upsilon) # Jacobian for propagating prior along time F = torch.eye(9) F[6:9, 3:6] = torch.eye(3) * dt # compute Adjoint of right transformation mean AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon)) Pprime = axat(AdUps.mm(F), P) # compound the covariances based on the second-order method Pprop = Pprime + Q if method == 1: # add fourth-order method Pprop += four_order(Pprime, Q) elif method == 2: # Monte Carlo method n_tot_samples = 1000000 nsamples = 50000 N = int(n_tot_samples / nsamples) + 1 tmp = torch.cholesky(P + 1e-20 * torch.eye(9)) cholP = tmp.cuda().expand(nsamples, 9, 9) cholQ = cholQ.cuda().expand(nsamples, 9, 9) Pprop = torch.zeros(9, 9) Gamma = Gamma.cuda().expand(nsamples, 5, 5) Upsilon = Upsilon.cuda().expand(nsamples, 5, 5) T0 = T0.cuda().expand(nsamples, 5, 5) T_inv = T.inverse().cuda().expand(nsamples, 5, 5) for i in range(N): xi0 = bmv(cholP, torch.randn(nsamples, 9).cuda()) w = bmv(cholQ, torch.randn(nsamples, 9).cuda()) T0_i = T0.bmm(SE3_2.exp(xi0)) Phi = f_flux(T0_i, dt) Upsilon_i = Upsilon.bmm(SE3_2.exp(w)) T_i = Gamma.bmm(Phi).bmm(Upsilon_i) xi = SE3_2.log(T_inv.bmm(T_i)) xi_mean = xi.mean(dim=0) Pprop += bouter(xi - xi_mean, xi - xi_mean).sum(dim=0).cpu() Pprop = Pprop / (N * nsamples + 1) Pprop = (Pprop + Pprop.t()) / 2 # symmetric return T, Pprop
def compute_results(T, i_max, T_est, Sigma_est, SigmaSO3, Sigma_est_mc): results = torch.zeros(3) # Methods on SE_2(3) chi_diff = SE3_2.uinv(T_est[-1]).expand(i_max, 5, 5).bmm(T[:, -1]) xi = SE3_2.log(chi_diff.cuda()).cpu() s_nees = compute_nees(Sigma_est, xi) mc_nees = compute_nees(Sigma_est_mc, xi) results[0] = s_nees results[1] = mc_nees # Method on SO(3) xi = SE3_2.boxminus(T_est[-1].expand(i_max, 5, 5).cuda(), T[:, -1].cuda()).cpu() s_nees = compute_nees(SigmaSO3, xi) results[2] = s_nees return results
def propagate(T0, Sigma, Upsilon, Q, method, dt, g): """Propagate state for one time step""" Gamma = f_Gamma(g, dt) Phi = f_flux(T0, dt) # propagate the mean T = Gamma.mm(Phi).mm(Upsilon) # Jacobian for propagating prior along time F = torch.eye(9) F[6:9, 3:6] = torch.eye(3) * dt # compute Adjoint of right transformation mean AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon)) Sigma_tmp = axat(AdUps.mm(F), Sigma) # compound the covariances based on the second-order method Sigma_prop = Sigma_tmp + Q if method == 1: # add fourth-order method Sigma_prop += four_order(Sigma_tmp, Q) elif method == 2: # SO(3) x R^6 wedge_acc = SO3.uwedge(Upsilon[:3, 3]) # already multiplied by dt F = torch.eye(9) F[3:6, :3] = T0[:3, :3].t() F[3:6, :3] = -T0[:3, :3].mm(wedge_acc) F[6:9, :3] = F[3:6, :3] * dt / 2 F[6:9, 3:6] = dt * torch.eye(3) G = torch.zeros(9, 6) G[:3, :3] = dt * T0[:3, :3].t() G[3:6, 3:6] = T0[:3, :3] * dt G[6:9, 3:6] = 1 / 2 * T0[:3, :3] * (dt**2) Sigma_prop = axat(F, Sigma) + axat(G, Q[:6, :6] / (dt**2)) Sigma_prop = (Sigma_prop + Sigma_prop.t()) / 2 # symmetric return T, Sigma_prop
def propagate(T0, Sigma, Upsilon, Q, method, dt, g, cholQ=0): """Propagate state for one time step""" Gamma = f_Gamma(g, dt) Phi = f_flux(T0, dt) # propagate the mean T = Gamma.mm(Phi).mm(Upsilon) # Jacobian for propagating prior along time F = torch.eye(9) F[6:9, 3:6] = torch.eye(3) * dt # compute Adjoint of right transformation mean AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon)) Sigma_tmp = axat(AdUps.mm(F), Sigma) # compound the covariances based on the second-order method Sigma_prop = Sigma_tmp + Q if method == 1: # add fourth-order method Sigma_prop += four_order(Sigma_tmp, Q) Sigma_prop = (Sigma_prop + Sigma_prop.t()) / 2 # symmetric return T, Sigma_prop
def compound(T0, Sigma, Upsilon, Q, method, dt, g, cholQ=0): Gamma = f_Gamma(g, dt) Phi = f_flux(T0, dt) # compound the mean T = Gamma.mm(Phi).mm(Upsilon) # Jacobian for propagating prior along time F = torch.eye(9) F[6:9, 3:6] = torch.eye(3) * dt # compute Adjoint of right transformation mean AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon)) Sigma_tmp = axat(AdUps.mm(F), Sigma) # compound the covariances based on the second-order method Sigma_prop = Sigma_tmp + Q if method == 3: # baseline SO(3) x R^6 wedge_acc = SO3.uwedge(Upsilon[:3, 3]) # already multiplied by dt F = torch.eye(9) F[3:6, :3] = T0[:3, :3].t() F[3:6, :3] = -T0[:3, :3].mm(wedge_acc) F[6:9, :3] = F[3:6, :3] * dt / 2 F[6:9, 3:6] = dt * torch.eye(3) G = torch.zeros(9, 6) G[:3, :3] = T0[:3, :3].t() G[3:6, 3:6] = T0[:3, :3] G[6:9, 3:6] = 1 / 2 * T0[:3, :3] * dt Sigma_prop = axat(F, Sigma) + axat(G, Q[:6, :6]) elif method == 4: # Monte Carlo method n_tot_samples = 100000 nsamples = 50000 N = int(n_tot_samples / nsamples) + 1 tmp = torch.cholesky(Sigma_prop + 1e-16 * torch.eye(9)) cholP = tmp.cuda().expand(nsamples, 9, 9) cholQ = cholQ.cuda().expand(nsamples, 9, 9) Sigma_prop = torch.zeros(9, 9) Gamma = Gamma.cuda().expand(nsamples, 5, 5) Upsilon = Upsilon.cuda().expand(nsamples, 5, 5) T0 = T0.cuda().expand(nsamples, 5, 5) T_inv = T.inverse().cuda().expand(nsamples, 5, 5) for i in range(N): xi0 = bmv(cholP, torch.randn(nsamples, 9).cuda()) w = bmv(cholQ, torch.randn(nsamples, 9).cuda()) T0_i = T0.bmm(SE3_2.exp(xi0)) Phi = f_flux(T0_i, dt) Upsilon_i = Upsilon.bmm(SE3_2.exp(w)) T_i = Gamma.bmm(Phi).bmm(Upsilon_i) xi = SE3_2.log(T_inv.bmm(T_i)) xi_mean = xi.mean(dim=0) Sigma_prop += bouter(xi - xi_mean, xi - xi_mean).sum(dim=0).cpu() Sigma_prop = Sigma_prop / (N * nsamples + 1) Sigma_prop = Sigma_prop / (N * nsamples + 1) Sigma_prop = (Sigma_prop + Sigma_prop.t()) / 2 return T, Sigma_prop