Beispiel #1
0
def propagate(T0, P, Upsilon, Q, method, dt, g, cholQ=0):
    """Propagate state for one time step"""
    Gamma = f_Gamma(g, dt)
    Phi = f_flux(T0, dt)
    # propagate the mean
    T = Gamma.mm(Phi).mm(Upsilon)

    # Jacobian for propagating prior along time
    F = torch.eye(9)
    F[6:9, 3:6] = torch.eye(3) * dt

    # compute Adjoint of right transformation mean
    AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon))

    Pprime = axat(AdUps.mm(F), P)
    # compound the covariances based on the second-order method
    Pprop = Pprime + Q

    if method == 1:
        # add fourth-order method
        Pprop += four_order(Pprime, Q)

    elif method == 2:
        # Monte Carlo method
        n_tot_samples = 1000000
        nsamples = 50000
        N = int(n_tot_samples / nsamples) + 1

        tmp = torch.cholesky(P + 1e-20 * torch.eye(9))
        cholP = tmp.cuda().expand(nsamples, 9, 9)
        cholQ = cholQ.cuda().expand(nsamples, 9, 9)

        Pprop = torch.zeros(9, 9)

        Gamma = Gamma.cuda().expand(nsamples, 5, 5)
        Upsilon = Upsilon.cuda().expand(nsamples, 5, 5)
        T0 = T0.cuda().expand(nsamples, 5, 5)
        T_inv = T.inverse().cuda().expand(nsamples, 5, 5)
        for i in range(N):
            xi0 = bmv(cholP, torch.randn(nsamples, 9).cuda())
            w = bmv(cholQ, torch.randn(nsamples, 9).cuda())
            T0_i = T0.bmm(SE3_2.exp(xi0))
            Phi = f_flux(T0_i, dt)
            Upsilon_i = Upsilon.bmm(SE3_2.exp(w))
            T_i = Gamma.bmm(Phi).bmm(Upsilon_i)

            xi = SE3_2.log(T_inv.bmm(T_i))
            xi_mean = xi.mean(dim=0)
            Pprop += bouter(xi - xi_mean, xi - xi_mean).sum(dim=0).cpu()

        Pprop = Pprop / (N * nsamples + 1)

    Pprop = (Pprop + Pprop.t()) / 2  # symmetric
    return T, Pprop
Beispiel #2
0
def correctPIM(DeltaRs, DeltaVs, DeltaPs, DUpsilonDb, biases, methode):
    """
    Correct preintegration measurement of $SE_2(3)$ and $SO(3)xR^6$ distributions.
    """
    N = DeltaRs.shape[0]
    delta_pim = bmv(DUpsilonDb.expand(N, 9, 6), biases)

    if methode == 1:
        # Correct preintegration measurement of SE_2(3) distribution.
        Upsilon = torch.eye(5)
        Upsilon[:3, :3] = DeltaRs[0]
        Upsilon[:3, 3] = DeltaVs[0]
        Upsilon[:3, 4] = DeltaPs[0]

        Upsilon_cor = Upsilon.expand(N, 5,
                                     5).bmm(SE3_2.exp(delta_pim.cuda()).cpu())
        DeltaRs_cor = Upsilon_cor[:, :3, :3]
        DeltaVs_cor = Upsilon_cor[:, :3, 3]
        DeltaPs_cor = Upsilon_cor[:, :3, 4]
    else:
        # Correct preintegration measurement of SO(3)xR^6 distribution.
        DeltaRs_cor = DeltaRs[0].expand(N, 3, 3).bmm(
            SO3.exp(delta_pim[:, :3].cuda()).cpu())
        DeltaVs_cor = DeltaVs[0].expand(N, 3) + bmv(DeltaRs[0].expand(N, 3, 3),
                                                    delta_pim[:, 3:6])
        DeltaPs_cor = DeltaPs[0].expand(N, 3) + bmv(DeltaRs[0].expand(N, 3, 3),
                                                    delta_pim[:, 6:9])

    return DeltaRs_cor, DeltaVs_cor, DeltaPs_cor
Beispiel #3
0
def main(i_max, k_max, T0, Upsilons, Q, cholQ, dt, g):
    # Generate some random samples
    # NOTE: initial covariance is zero
    T = torch.zeros(i_max, k_max, 5, 5).cuda()
    T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
    Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
    tmp = cholQ.cuda().expand(i_max, 9, 9)
    for k in range(1, k_max):
        T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
        Phi = f_flux(T[:, k-1], dt)
        tmp2 = Upsilons[k].cuda().expand(i_max, 5, 5)
        T[:, k] = Gamma.bmm(Phi).bmm(T_k).bmm(tmp2)
    T = T.cpu()

    # Propagate the uncertainty using second- and fourth-order methods
    T_est = torch.zeros(k_max, 5, 5)
    Sigma_est = torch.zeros(k_max, 9, 9) # covariance
    SigmaSO3 = torch.zeros(k_max, 9, 9) # SO(3) x R^6 covariance
    Sigma_est_mc = torch.zeros(k_max, 9, 9) # Monte-Carlo covariance on SE_2(3)

    T_est[0] = T0
    for k in range(1, k_max):
        # Second-order method
        T_est[k], Sigma_est[k] = compound(T_est[k-1], Sigma_est[k-1], Upsilons[k], Q, 1, dt, g)
        # baseline method
        _, SigmaSO3[k] = compound(T_est[k-1], SigmaSO3[k-1], Upsilons[k], Q, 3, dt, g)
        # Monte-Carlo method
        _, Sigma_est_mc[k] = compound(T_est[k-1], Sigma_est_mc[k-1], Upsilons[k], Q, 4, dt, g, cholQ)


    results = compute_results(T, i_max, T_est, Sigma_est, SigmaSO3, Sigma_est_mc)
    return results
Beispiel #4
0
def plot_se23_helper(T_est, P_est, color, i1, i2, i_max, path):
    P_est_chol = torch.cholesky(P_est)
    r = bmv(P_est_chol.expand(i_max, 9, 9), torch.randn(i_max, 9))
    r = r[r.norm(dim=1) < 8.1682]
    Ttemp = T_est.expand(r.shape[0], 5, 5).bmm(SE3_2.exp(r.cuda()).cpu())
    p_est = Ttemp[:, :3, 4]
    plt.scatter(p_est[:, i1], p_est[:, i2], color=color, s=3)
    if i1 == 0 and i2 == 1:
        np.savetxt(path, p_est.numpy(), header="x y z", comments='')
Beispiel #5
0
def main(i_max, k_max, T0, P0, Upsilon, Q, cholQ, dt, g):
    # Generate some random samples
    T = torch.zeros(i_max, k_max, 5, 5).cuda()
    T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
    tmp = P0.sqrt().cuda().expand(i_max, 9, 9)  # Pxi assumed diagonal!
    T[:, 0] = T[:, 0].bmm(SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda())))
    Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
    tmp = cholQ.cuda().expand(i_max, 9, 9)
    tmp2 = Upsilon.cuda().expand(i_max, 5, 5)
    for k in range(1, k_max):
        T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
        Phi = f_flux(T[:, k - 1], dt)
        T[:, k] = Gamma.bmm(Phi).bmm(tmp2).bmm(T_k)
    T = T.cpu()

    # Propagate the uncertainty using second- and fourth-order methods
    T_est = torch.zeros(k_max, 5, 5)
    P_est1 = torch.zeros(k_max, 9, 9)  # second order covariance
    P_est2 = torch.zeros(k_max, 9, 9)  # fourth order covariance
    P_est_mc = torch.zeros(k_max, 9, 9)  # SO(3) x R^6 covariance
    T_est[0] = T0
    P_est1[0] = P0.clone()
    P_est2[0] = P0.clone()
    P_est_mc[0] = P0.clone()
    for k in range(1, k_max):
        # Second-order method
        T_est[k], P_est1[k] = propagate(T_est[k - 1], P_est1[k - 1], Upsilon,
                                        Q, 0, dt, g)
        # Fourth-order method
        _, P_est2[k] = propagate(T_est[k - 1], P_est2[k - 1], Upsilon, Q, 1,
                                 dt, g)
        # baseline method
        _, P_est_mc[k] = propagate(T_est[k - 1], P_est_mc[k - 1], Upsilon, Q,
                                   2, dt, g, cholQ)

    res = torch.zeros(3)
    res[1] = fro_norm(P_est_mc[-1], P_est1[-1])
    res[2] = fro_norm(P_est_mc[-1], P_est2[-1])
    print(fro_norm(P_est1[-1], P_est2[-1]))
    return res
Beispiel #6
0
def main(i_max, k_max, T0, Sigma0, Upsilon, Q, cholQ, dt, g):
    # Generate some random samples
    T = torch.zeros(i_max, k_max, 5, 5).cuda()
    T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
    tmp = Sigma0.sqrt().cuda().expand(i_max, 9, 9)  # Sigma0 assumed diagonal!
    T[:, 0] = T[:, 0].bmm(SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda())))
    Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
    tmp = cholQ.cuda().expand(i_max, 9, 9)
    tmp2 = Upsilon.cuda().expand(i_max, 5, 5)
    for k in range(1, k_max):
        T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
        Phi = f_flux(T[:, k - 1], dt)
        T[:, k] = Gamma.bmm(Phi).bmm(tmp2).bmm(T_k)
    T = T.cpu()

    # Propagate the uncertainty using second- and fourth-order methods
    T_est = torch.zeros(k_max, 5, 5)
    Sigma2th = torch.zeros(k_max, 9, 9)  # second order covariance
    Sigma4th = torch.zeros(k_max, 9, 9)  # fourth order covariance

    T_est[0] = T0
    Sigma2th[0] = Sigma0.clone()
    Sigma4th[0] = Sigma0.clone()
    for k in range(1, k_max):
        # Second-order method
        T_est[k], Sigma2th[k] = propagate(T_est[k - 1], Sigma2th[k - 1],
                                          Upsilon, Q, 0, dt, g)
        # Fourth-order method
        _, Sigma4th[k] = propagate(T_est[k - 1], Sigma4th[k - 1], Upsilon, Q,
                                   1, dt, g)

    xi = SE3_2.log((T_est[-1].inverse().expand(i_max, 5, 5).bmm(T[:,
                                                                  -1])).cuda())
    P_est_mc = bouter(xi, xi).sum(dim=0).cpu() / (i_max - 1)
    res = torch.zeros(3)
    res[1] = fro_norm(P_est_mc[-1], Sigma2th[-1])
    res[2] = fro_norm(P_est_mc[-1], Sigma4th[-1])
    return res
Beispiel #7
0
def main(i_max, k_max, T0, Sigma0, Upsilon, Q, cholQ, dt, g, sigma, m_max,
         paths):
    # Generate some random samples
    T = torch.zeros(i_max, k_max, 5, 5).cuda()
    T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
    # NOTE: no initial uncertainty
    Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
    tmp = cholQ.cuda().expand(i_max, 9, 9)
    tmp2 = Upsilon.cuda().expand(i_max, 5, 5)
    for k in range(1, k_max):
        T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
        Phi = f_flux(T[:, k - 1], dt)
        T[:, k] = Gamma.bmm(Phi).bmm(T_k).bmm(tmp2)
    T = T.cpu()

    # Propagate the uncertainty methods
    T_est = torch.zeros(k_max, 5, 5)
    P_est = torch.zeros(k_max, 9, 9)
    SigmaSO3 = torch.zeros(k_max, 9, 9)  # SO(3) x R^6 covariance
    T_est[0] = T0
    P_est[0] = Sigma0.clone()
    SigmaSO3[0] = Sigma0.clone()
    for k in range(1, k_max):
        T_est[k], P_est[k] = propagate(T_est[k - 1], P_est[k - 1], Upsilon, Q,
                                       1, dt, g)
        # baseline method
        _, SigmaSO3[k] = propagate(T_est[k - 1], SigmaSO3[k - 1], Upsilon, Q,
                                   2, dt, g)

    # Now plot the transformations
    labels = ['x (m)', 'y (m)', 'z (m)']
    for i1, i2 in ((0, 1), (0, 2), (1, 2)):
        plt.figure()
        # Plot the covariance of the samples
        plot_so3_helper(T_est, SigmaSO3[-1], "red", i1, i2, i_max, paths[1])
        # Plot the propagated covariance projected onto i1, i2
        plot_se23_helper(T_est, P_est[-1], 'green', i1, i2, i_max, paths[2])
        # Plot the random samples' xy-locations
        plt.scatter(T[:, -1, i1, 4],
                    T[:, -1, i2, 4],
                    s=1,
                    color='black',
                    alpha=0.5)
        plt.scatter(T_est[-1, i1, 4], T_est[-1, i2, 4], color='yellow', s=30)
        plt.xlabel(labels[i1])
        plt.ylabel(labels[i2])
        plt.legend([r"$SO(3) \times \mathbb{R}^6$", r"$SE_2(3)$"])
    # np.savetxt(paths[0], T[:, -1, :3, 4].numpy(), header="x y z", comments='')
    plt.show()
Beispiel #8
0
def plot_se23_helper(T_est, P_est, v, color, i1, i2):
    """
    Draw ellipse based on the 3 more important directions of the covariance
    """
    D, V = torch.eig(P_est, eigenvectors=True)
    Y, I = torch.sort(D[:, 0], descending=True)
    a = sigma * D[I[0], 0].sqrt() * V[:, I[0]]
    b = sigma * D[I[1], 0].sqrt() * V[:, I[1]]
    c = sigma * D[I[2], 0].sqrt() * V[:, I[2]]
    for n in range(3):
        if n == 0:
            xi = a * v.sin() + b * v.cos()
        elif n == 1:
            xi = b * v.sin() + c * v.cos()
        elif n == 2:
            xi = a * v.sin() + c * v.cos()
        Ttemp = T_est[-1].expand(m_max, 5, 5).bmm(SE3_2.exp(xi.cuda()).cpu())
        clines = Ttemp[:, :3, 4]
        plt.plot(clines[:, i1], clines[:, i2], color=color)
Beispiel #9
0
def plot_se23_helper(T_est, P_est, color, i1, i2, i_max, path):
    P_est_chol = torch.cholesky(P_est + torch.eye(9) * 1e-16)
    r = bmv(P_est_chol.expand(i_max, 9, 9), torch.randn(i_max, 9))
    Ttemp = T_est[-1].expand(i_max, 5, 5).bmm(SE3_2.exp(r.cuda()).cpu())
    p_est = Ttemp[:, :3, 4]
    plt.scatter(p_est[:, i1], p_est[:, i2], color=color, s=3)
Beispiel #10
0
def compound(T0, Sigma, Upsilon, Q, method, dt, g, cholQ=0):
    Gamma = f_Gamma(g, dt)
    Phi = f_flux(T0, dt)
    # compound the mean
    T = Gamma.mm(Phi).mm(Upsilon)

    # Jacobian for propagating prior along time
    F = torch.eye(9)
    F[6:9, 3:6] = torch.eye(3) * dt

    # compute Adjoint of right transformation mean
    AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon))
    Sigma_tmp = axat(AdUps.mm(F), Sigma)
    # compound the covariances based on the second-order method
    Sigma_prop = Sigma_tmp + Q

    if method == 3:
        # baseline SO(3) x R^6
        wedge_acc = SO3.uwedge(Upsilon[:3, 3])  # already multiplied by dt
        F = torch.eye(9)
        F[3:6, :3] = T0[:3, :3].t()
        F[3:6, :3] = -T0[:3, :3].mm(wedge_acc)
        F[6:9, :3] = F[3:6, :3] * dt / 2
        F[6:9, 3:6] = dt * torch.eye(3)

        G = torch.zeros(9, 6)
        G[:3, :3] = T0[:3, :3].t()
        G[3:6, 3:6] = T0[:3, :3]
        G[6:9, 3:6] = 1 / 2 * T0[:3, :3] * dt
        Sigma_prop = axat(F, Sigma) + axat(G, Q[:6, :6])

    elif method == 4:
        # Monte Carlo method
        n_tot_samples = 100000
        nsamples = 50000
        N = int(n_tot_samples / nsamples) + 1

        tmp = torch.cholesky(Sigma_prop + 1e-16 * torch.eye(9))
        cholP = tmp.cuda().expand(nsamples, 9, 9)
        cholQ = cholQ.cuda().expand(nsamples, 9, 9)

        Sigma_prop = torch.zeros(9, 9)

        Gamma = Gamma.cuda().expand(nsamples, 5, 5)
        Upsilon = Upsilon.cuda().expand(nsamples, 5, 5)
        T0 = T0.cuda().expand(nsamples, 5, 5)
        T_inv = T.inverse().cuda().expand(nsamples, 5, 5)
        for i in range(N):
            xi0 = bmv(cholP, torch.randn(nsamples, 9).cuda())
            w = bmv(cholQ, torch.randn(nsamples, 9).cuda())
            T0_i = T0.bmm(SE3_2.exp(xi0))
            Phi = f_flux(T0_i, dt)
            Upsilon_i = Upsilon.bmm(SE3_2.exp(w))
            T_i = Gamma.bmm(Phi).bmm(Upsilon_i)
            xi = SE3_2.log(T_inv.bmm(T_i))
            xi_mean = xi.mean(dim=0)
            Sigma_prop += bouter(xi - xi_mean, xi - xi_mean).sum(dim=0).cpu()

        Sigma_prop = Sigma_prop / (N * nsamples + 1)

        Sigma_prop = Sigma_prop / (N * nsamples + 1)

    Sigma_prop = (Sigma_prop + Sigma_prop.t()) / 2
    return T, Sigma_prop
Beispiel #11
0
def main(i_max, k_max, T0, Sigma0, Upsilon, Q, cholQ, dt, g, sigma, m_max):
    # Generate some random samples
    T = torch.zeros(i_max, k_max, 5, 5).cuda()
    T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
    tmp = Sigma0.sqrt().cuda().expand(i_max, 9, 9)  # Pxi assumed diagonal!
    T[:, 0] = T[:, 0].bmm(SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda())))
    Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
    tmp = cholQ.cuda().expand(i_max, 9, 9)
    tmp2 = Upsilon.cuda().expand(i_max, 5, 5)
    for k in range(1, k_max):
        T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
        Phi = f_flux(T[:, k - 1], dt)
        T[:, k] = Gamma.bmm(Phi).bmm(tmp2).bmm(T_k)
    T = T.cpu()

    # Propagate the uncertainty using second- and fourth-order methods
    T_est = torch.zeros(k_max, 5, 5)
    Sigma2th = torch.zeros(k_max, 9, 9)  # second order covariance
    Sigma4th = torch.zeros(k_max, 9, 9)  # fourth order covariance
    SigmaSO3 = torch.zeros(k_max, 9, 9)  # SO(3) x R^6 covariance
    T_est[0] = T0
    Sigma2th[0] = Sigma0.clone()
    Sigma4th[0] = Sigma0.clone()
    SigmaSO3[0] = Sigma0.clone()
    for k in range(1, k_max):
        # Second-order method
        T_est[k], Sigma2th[k] = propagate(T_est[k - 1], Sigma2th[k - 1],
                                          Upsilon, Q, 0, dt, g)
        # Fourth-order method
        _, Sigma4th[k] = propagate(T_est[k - 1], Sigma4th[k - 1], Upsilon, Q,
                                   1, dt, g)
        # baseline method
        _, SigmaSO3[k] = propagate(T_est[k - 1], SigmaSO3[k - 1], Upsilon, Q,
                                   2, dt, g)

    ## Numerical check of paper formulas
    # Sigma_K = Sigma2th[-1]
    # sigma = Q[2, 2].sqrt()
    # K = k_max-1
    # a = 1
    # Deltat = 0.05

    # Sigma_phiphi = K * sigma * sigma
    # print(Sigma_phiphi, Sigma_K[2, 2])

    # Sigma_phiv = -(K-1)/2 * a * Deltat * Sigma_phiphi
    # print(Sigma_phiv, Sigma_K[2, 4])

    # Sigma_phip = (K-1)*(2*K-1)/12 * a * (Deltat**2) * Sigma_phiphi
    # print(Sigma_phip, Sigma_K[2, 7])

    # Sigma_vv = (K-1)*(2*K-1)/6 * ((a * Deltat)**2) * Sigma_phiphi
    # print(Sigma_vv, Sigma_K[4, 4])

    # Sigma_vp = (K-1)**2 * (K)**2 * 1/(8*K) * ((a**2) * (Deltat**3)) * Sigma_phiphi
    # print(Sigma_vp, Sigma_K[4, 7])

    # Sigma_pp = (K-1)*(2*K-1)*(3*(K-1)**2 + 3*K -4)/120 * ((a**2) * (Deltat**4)) * Sigma_phiphi
    # print(Sigma_pp, Sigma_K[7, 7])

    # Plot the random samples' trajectory lines
    for i in range(i_max):
        plt.plot(T[i, :, 0, 4], T[i, :, 1, 4], color='gray', alpha=0.1)

    v = (2 * np.pi * torch.arange(m_max) / (m_max - 1) - np.pi).unsqueeze(1)
    x = T[:, -1, :3, 4]
    xmean = torch.mean(x, dim=0)
    vSigma = bouter(x - xmean, x - xmean).sum(dim=0) / (i_max - 1)

    # Plot blue dots for random samples
    plt.scatter(T[:, -1, 0, 4], T[:, -1, 1, 4], s=2, color='black')
    # Plot the mean of the samples
    plt.scatter(xmean[0], xmean[1], label='mean', color='orange')
    # Plot the covariance of the samples
    T_est2 = T_est.clone()
    T_est2[-1, :3, 4] = xmean
    plot_so3_helper(T_est2, vSigma, v, "orange", 0, 1)
    plt.scatter(T_est[-1, 0, 4],
                T_est[-1, 1, 4],
                label='estimation',
                color='green')
    plot_se23_helper(T_est, Sigma2th[-1], v, 'green', 0, 1)
    plot_so3_helper(T_est, SigmaSO3[-1, 6:9, 6:9], v, 'red', 0, 1)
    plot_se23_helper(T_est, Sigma4th[-1], v, 'cyan', 0, 1)
    plt.xlabel('x')
    plt.xlim(left=0)
    plt.ylabel('y')
    plt.show()