def run_epipolar_finite_differences():
    np.random.seed(0)

    bezier_order = 4
    pos_controls = np.random.randn(bezier_order, 3)
    ti, tj = np.random.randn(2)
    zi, zj = np.random.randn(2, 3)
    Ri = SO3.exp(np.random.randn(3))
    Rj = SO3.exp(np.random.randn(3))

    def r(delta):
        assert len(delta) == bezier_order * 3
        return epipolar_residual(pos_controls + delta.reshape((bezier_order, 3)),
                                 ti, tj, zi, zj, Ri, Rj)

    J_numeric = np.squeeze(numdifftools.Jacobian(r)(np.zeros(bezier_order*3)))
    J_analytic = epipolar_jacobian(bezier_order, ti, tj, zi, zj, Ri, Rj)

    print '\nNumeric:'
    print J_numeric

    print '\nAnalytic:'
    print J_analytic

    np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
def run_accel_finite_differences():
    np.random.seed(0)

    bezier_order = 4
    pos_controls = np.random.randn(bezier_order, 3)
    accel_bias = np.random.randn(3)
    gravity = np.random.randn(3)
    a = np.random.randn(3)
    R = SO3.exp(np.random.randn(3))
    t = .5

    def r(delta):
        k = bezier_order * 3
        assert len(delta) == k + 6
        return accel_residual(pos_controls + delta[:k].reshape((bezier_order, 3)),
                              accel_bias + delta[k:k],
                              gravity + delta[k+3:k+6],
                              t,
                              a,
                              R)

    J_numeric = numdifftools.Jacobian(r)(np.zeros(bezier_order*3+6))
    J_analytic = accel_jacobian(bezier_order, t, R)

    print '\nNumeric:'
    print J_numeric

    print '\nAnalytic:'
    print J_analytic

    np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
def cayley_inv(r):
    w = SO3.log(r)
    theta = np.linalg.norm(w)
    if theta < 1e-8:
        return w
    else:
        return w * np.tan(theta / 2.) / theta
Beispiel #4
0
def create_planar_bundle(noise=.1):
    NUM_CAMERAS        = 4
    NUM_POINTS         = 12
    POINT_CLOUD_RADIUS = 5.
    MISSING_FRAC       = 0      # fraction of measurements that are missing
    OUTLIER_FRAC       = 0      # fraction of measurements that are outliers
    OUTLIER_RANGE      = 10.    # bounds of uniform distribution from which outliers are sampled
    CAUCHY_PARAM       = .05    # determines the width of the robustifier

    # Setup some cameras
    np.random.seed(1111)  # for repeatability
    K = np.array([[  2.,  0., -.5  ],
                  [ .05,  3.,  .1  ],
                  [  0.,  0.,   1. ]])

    # Random rotations; zero translation
    Rs = [ SO3.exp(np.random.rand(3)*.1) for i in range(NUM_CAMERAS) ]
    ts = np.zeros((NUM_CAMERAS, 3))

    # Sample 3D points
    r = POINT_CLOUD_RADIUS
    pts = np.random.uniform(-r, r, (NUM_POINTS, 3))
    pts[:,2] += 10   # ensure points are not close to focal plane

    # Compute ideal projections and add noise
    msm = np.array([[ bundle.project(K, R, t, pt) for pt in pts ]
                    for (R,t) in zip(Rs,ts) ])
    msm += np.random.randn(*msm.shape) * noise

    # Mark some measurements as missing
    np.random.seed(4309)  # for repeatability
    msm_mask = np.ones(msm.shape[:2], bool)
    nmissing = int(MISSING_FRAC * NUM_POINTS)
    if nmissing > 0:
        for i in range(NUM_CAMERAS):
            missing_inds = np.random.permutation(NUM_POINTS)[:nmissing]
            msm_mask[i, missing_inds] = False

    # Generate some outliers by replacing measurements with random data
    np.random.seed(101)  # for repeatability
    outlier_mask = np.zeros((NUM_CAMERAS, NUM_POINTS), bool)
    noutliers = int(OUTLIER_FRAC * NUM_POINTS)
    if noutliers > 0:
        for i in range(NUM_CAMERAS):
            outlier_inds = np.random.permutation(NUM_POINTS)[:noutliers]
            outlier_mask[i,outlier_inds] = True
            for j in outlier_inds:
                msm[i,j] = np.random.uniform(-OUTLIER_RANGE, OUTLIER_RANGE, 2)

    # Create the bundle
    b = bundle.Bundle.FromArrays(K, Rs, ts, pts, msm, msm_mask)

    # Attach robustifier
    b.sensor_model = sensor_model.GaussianModel(.1)
    #b.sensor_model = sensor_model.CauchyModel(CAUCHY_PARAM)

    # Store this for visualization later
    b.outlier_mask = outlier_mask
    return b
Beispiel #5
0
def create_planar_bundle(noise=.1):
    NUM_CAMERAS = 4
    NUM_POINTS = 12
    POINT_CLOUD_RADIUS = 5.
    MISSING_FRAC = 0  # fraction of measurements that are missing
    OUTLIER_FRAC = 0  # fraction of measurements that are outliers
    OUTLIER_RANGE = 10.  # bounds of uniform distribution from which outliers are sampled
    CAUCHY_PARAM = .05  # determines the width of the robustifier

    # Setup some cameras
    np.random.seed(1111)  # for repeatability
    K = np.array([[2., 0., -.5], [.05, 3., .1], [0., 0., 1.]])

    # Random rotations; zero translation
    Rs = [SO3.exp(np.random.rand(3) * .1) for i in range(NUM_CAMERAS)]
    ts = np.zeros((NUM_CAMERAS, 3))

    # Sample 3D points
    r = POINT_CLOUD_RADIUS
    pts = np.random.uniform(-r, r, (NUM_POINTS, 3))
    pts[:, 2] += 10  # ensure points are not close to focal plane

    # Compute ideal projections and add noise
    msm = np.array([[bundle.project(K, R, t, pt) for pt in pts]
                    for (R, t) in zip(Rs, ts)])
    msm += np.random.randn(*msm.shape) * noise

    # Mark some measurements as missing
    np.random.seed(4309)  # for repeatability
    msm_mask = np.ones(msm.shape[:2], bool)
    nmissing = int(MISSING_FRAC * NUM_POINTS)
    if nmissing > 0:
        for i in range(NUM_CAMERAS):
            missing_inds = np.random.permutation(NUM_POINTS)[:nmissing]
            msm_mask[i, missing_inds] = False

    # Generate some outliers by replacing measurements with random data
    np.random.seed(101)  # for repeatability
    outlier_mask = np.zeros((NUM_CAMERAS, NUM_POINTS), bool)
    noutliers = int(OUTLIER_FRAC * NUM_POINTS)
    if noutliers > 0:
        for i in range(NUM_CAMERAS):
            outlier_inds = np.random.permutation(NUM_POINTS)[:noutliers]
            outlier_mask[i, outlier_inds] = True
            for j in outlier_inds:
                msm[i, j] = np.random.uniform(-OUTLIER_RANGE, OUTLIER_RANGE, 2)

    # Create the bundle
    b = bundle.Bundle.FromArrays(K, Rs, ts, pts, msm, msm_mask)

    # Attach robustifier
    b.sensor_model = sensor_model.GaussianModel(.1)
    #b.sensor_model = sensor_model.CauchyModel(CAUCHY_PARAM)

    # Store this for visualization later
    b.outlier_mask = outlier_mask
    return b
def main():
    orientation_data = np.loadtxt('out/frame_orientations.txt')
    accel_data = np.loadtxt('out/accelerometer.txt')

    frame_timestamps = orientation_data[:,0]
    frame_orientations = orientation_data[:, 1:].reshape((-1, 3, 3))

    accel_timestamps = accel_data[:,0]
    accel_readings = accel_data[:,1:]

    accel_orientations = []
    for accel_time in accel_timestamps:
        frame_index = bisect.bisect_left(frame_timestamps, accel_time)
        assert 0 < frame_index < len(frame_timestamps), 't='+accel_time
        t0 = frame_timestamps[frame_index-1]
        r0 = frame_orientations[frame_index-1]
        t1 = frame_timestamps[frame_index]
        r1 = frame_orientations[frame_index]
        w01 = SO3.log(np.dot(r1, r0.T))
        a = (accel_time - t0) / (t1 - t0)
        assert 0 <= a <= 1
        accel_orientations.append(np.dot(SO3.exp(a*w01), r0))

    frame_ws = []
    accel_ws = []
    rbase = frame_orientations[0]
    for r in frame_orientations:
        frame_ws.append(SO3.log(np.dot(r, rbase.T)))
    for r in accel_orientations:
        accel_ws.append(SO3.log(np.dot(r, rbase.T)))

    np.savetxt('out/accel_orientations.txt',
               np.hstack((accel_timestamps[:,None], np.reshape(accel_orientations, (-1, 9)))))

    plt.clf()
    plt.hold('on')
    plt.plot(frame_timestamps, np.asarray(frame_orientations).reshape((-1, 9)))
    plt.plot(accel_timestamps, np.asarray(accel_orientations).reshape((-1, 9)), '.')
    #plt.plot(frame_timestamps, map(np.linalg.norm, frame_ws))
    #plt.plot(accel_timestamps, map(np.linalg.norm, accel_ws), '.')
    plt.show()
def main():
    a = SO3.exp(np.random.rand(3))
    b = np.array((2, 2, 2))

    norm_x = 1
    true_x = np.dot(a.T, normalized(b))

    u, s, vt = np.linalg.svd(a)
    v = vt.T
    btilde = np.dot(u.T, b)

    def secular(k):
        return np.sum(np.square(s*btilde / (s*s + k))) - norm_x*norm_x

    k = scipy.optimize.fsolve(secular, 1.)

    estimated_x = np.dot(v, s*btilde / (s*s + k))
    print estimated_x
    print np.dot(a, estimated_x)
def run_reprojection_finite_differences():
    np.random.seed(0)

    bezier_order = 4
    pos_controls = np.random.randn(bezier_order, 3)
    landmark = np.random.randn(3)
    depth = np.random.randn()
    timestamp = .5
    feature = np.random.randn(3)
    orientation = SO3.exp(np.random.randn(3))

    position_offset = 0
    accel_bias_offset = position_offset + bezier_order * 3
    gravity_offset = accel_bias_offset + 3
    landmark_offset = gravity_offset + 3
    depth_offset = landmark_offset + 3
    size = depth_offset + 1

    def r(delta):
        return reprojection_residual(
            pos_controls + delta[:accel_bias_offset].reshape(pos_controls.shape),
            landmark + delta[landmark_offset:depth_offset],
            depth + delta[depth_offset],
            timestamp,
            feature,
            orientation)

    J_numeric = numdifftools.Jacobian(r)(np.zeros(size))

    J_wrt_p, J_wrt_x, J_wrt_k = reprojection_jacobian(bezier_order, timestamp, feature, orientation)
    print J_wrt_p.shape, J_wrt_x.shape, J_wrt_k[:,None].shape
    J_analytic = np.hstack((J_wrt_p, np.zeros((3, 6)), J_wrt_x, J_wrt_k[:, None]))

    print '\nNumeric:'
    print J_numeric

    print '\nAnalytic:'
    print J_analytic

    np.testing.assert_array_almost_equal(J_numeric, J_analytic, decimal=8)
Beispiel #9
0
def run_with_synthetic_data():
    R_true, t_true, xs0, xs1 = setup_test_problem()
    #savetxt('standalone/essential_matrix_data/true_pose.txt',
    #        hstack((R_true, t_true[:,newaxis])), fmt='%10f')

    PERTURB = .1

    random.seed(73)
    R_init = dot(R_true, SO3.exp(random.randn(3)*PERTURB))
    t_init = t_true + random.randn(3)*PERTURB

    # savetxt('standalone/essential_matrix_data/init_pose.txt',
    #         hstack((R_init, t_init[:,newaxis])),
    #         fmt='%10f')
    # savetxt('standalone/essential_matrix_data/100_correspondences_tenpercent_noise.txt',
    #         hstack((xs0, xs1)),
    #         fmt='%10f')
    
    R_opt, t_opt = optimize_fmatrix(xs0, xs1, R_init, t_init)

    print '\nTrue [R t]:'
    print hstack((R_true, t_true[:,newaxis]))
    print '\nInitial [R t]:'
    print hstack((R_init, t_init[:,newaxis]))
    print '\nFinal [R t]:'
    print hstack((R_opt, t_opt[:,newaxis]))

    print '\nError in R:'
    print dot(R_opt.T, R_true) - eye(3)
    print '\nError in t:'
    print abs(t_opt - t_true)

    print '\nAlgebraic error at true:'
    print '  ',algebraic_error_forwards(K, R_true, t_true, xs0, xs1)
    print '\nAlgebraic error at initial:'
    print '  ',algebraic_error_forwards(K, R_init, t_init, xs0, xs1)
    print '\nAlgebraic error at final:'
    print '  ',algebraic_error_forwards(K, R_opt, t_opt, xs0, xs1)
def angular_velocity_right(f, t, step=1e-8):
    return SO3.log(np.dot(f(t).T, f(t + step))) / step
Beispiel #11
0
def optimize_fmatrix(xs0, xs1, R_init, t_init):
    num_steps = 0
    converged = False
    damping = INIT_DAMPING
    R_cur = R_init.copy()
    t_cur = t_init.copy()
    t_cur /= norm(t_cur)

    xs0 = unpr(xs0)   # convert a list of 2-vectors to a list of 3-vectors with the last element set to 1
    xs1 = unpr(xs1)
    cost_cur = cost_robust(K, R_cur, t_cur, xs0, xs1)

    # Create a function that returns a new function that linearizes costfunc about (R,t)
    # This is used for debugging only
    #flocal = lambda R,t: lambda v: costfunc(K, dot(R, SO3.exp(v[:3])), t+v[3:], xs0, xs1)

    while num_steps < MAX_STEPS and damping < 1e+8 and not converged:
        print 'Step %d:   cost=%-10f damping=%f' % (num_steps, cost_cur, damping)

        # Compute normal equations
        JTJ,JTr = compute_normal_equations(K, R_cur, t_cur, xs0, xs1)

        # Pick a step length
        while damping < 1e+8 and not converged:
            # Check for failure
            if damping > 1e+8:
                print 'FAILED TO CONVERGE'
                break

            # Apply Levenberg-Marquardt damping
            b = JTr.copy()
            A = JTJ.copy()
            for i in range(A.shape[0]):
                A[i,i] *= (1. + damping)

            # Freeze one translation parameter
            if FREEZE_LARGEST:
                param_to_freeze = 3 + np.argmax(t_cur)
                mask = (arange(6) != param_to_freeze)
                A = A[mask].T[mask].T
                b = b[mask]
    
            # Solve normal equations: 6x6
            try:
                update = -solve(A, b)
            except LinAlgError:
                # Badly conditioned: increase damping and try again
                damping *= 10.
                continue

            # Expand the update to length 6
            if FREEZE_LARGEST:
                update = unmask(update, mask)

            # Take gradient step
            R_next = dot(R_cur, SO3.exp(update[:3]))
            t_next = t_cur + update[3:]

            # Normalize to unit length
            t_next /= norm(t_next)

            # Compute new cost
            cost_next = cost_robust(K, R_next, t_next, xs0, xs1)
            if cost_next < cost_cur:
                # Cost decreased: accept the update
                if cost_cur - cost_next < CONVERGENCE_THRESH:
                    print 'Converged due to small improvement'
                    converged = True

                # Do not decrease damping if we're in danger of hitting machine epsilon
                if damping > 1e-15:
                    damping *= .1

                R_cur = R_next
                t_cur = t_next
                cost_cur = cost_next
                num_steps += 1
                break

            else:
                # Cost increased: reject the udpate, try another step length
                damping *= 10.

    if converged:
        print 'CONVERGED AFTER %d STEPS' % num_steps

    if DEBUG:
        JTJ,JTr = compute_normal_equations(K, R_cur, t_cur, xs0, xs1, residualfunc, Jresidualfunc)
        print 'Gradient magnitude at convergence:',norm(2.*JTr)

    return R_cur, t_cur
Beispiel #12
0
def run_simulation_nonsymbolic():
    np.random.seed(1)

    #
    # Construct ground truth
    #
    num_frames = 5
    num_landmarks = 50
    num_imu_readings = 80
    bezier_degree = 4
    out = 'out/position_only_bezier3'

    print 'Num landmarks:', num_landmarks
    print 'Num frames:', num_frames
    print 'Num IMU readings:', num_imu_readings
    print 'Bezier curve degree:', bezier_degree

    if not os.path.isdir(out):
        os.mkdir(out)

    # Both splines should start at 0,0,0
    frame_times = np.linspace(0, .9, num_frames)
    accel_timestamps = np.linspace(0, 1, num_imu_readings)

    true_rot_controls = np.random.randn(bezier_degree-1, 3)
    true_pos_controls = np.random.randn(bezier_degree-1, 3)

    true_landmarks = np.random.randn(num_landmarks, 3)

    true_frame_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in frame_times])
    true_frame_orientations = np.array(map(cayley, true_frame_cayleys))
    true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in frame_times])

    true_imu_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in accel_timestamps])
    true_imu_orientations = np.array(map(cayley, true_imu_cayleys))

    true_gravity_magnitude = 9.8
    true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude
    true_accel_bias = np.random.randn(3)
    true_global_accels = np.array([bezier.zero_offset_bezier_second_deriv(true_pos_controls, t) for t in accel_timestamps])
    true_accels = np.array([np.dot(R, a + true_gravity) + true_accel_bias
                            for R, a in zip(true_imu_orientations, true_global_accels)])

    true_features = np.array([[normalized(np.dot(R, x-p)) for x in true_landmarks]
                              for R, p in zip(true_frame_orientations, true_frame_positions)])

    print np.min(true_features.reshape((-1, 3)), axis=0)
    print np.max(true_features.reshape((-1, 3)), axis=0)

    #
    # Add sensor noise
    #

    accel_noise = 0#0.001
    feature_noise = 0#0.01
    orientation_noise = 0.01

    observed_frame_orientations = true_frame_orientations.copy()
    observed_imu_orientations = true_imu_orientations.copy()
    observed_features = true_features.copy()
    observed_accels = true_accels.copy()

    if orientation_noise > 0:
        for i, R in enumerate(observed_frame_orientations):
            R_noise = SO3.exp(np.random.randn(3)*orientation_noise)
            observed_frame_orientations[i] = np.dot(R_noise, R)
        for i, R in enumerate(observed_imu_orientations):
            R_noise = SO3.exp(np.random.randn(3)*orientation_noise)
            observed_imu_orientations[i] = np.dot(R_noise, R)

    if accel_noise > 0:
        observed_accels += np.random.randn(*observed_accels.shape) * accel_noise

    if feature_noise > 0:
        observed_features += np.random.rand(*observed_features.shape) * feature_noise

    position_offs = 0
    accel_bias_offset = position_offs + (bezier_degree-1)*3
    gravity_offset = accel_bias_offset + 3

    true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity))

    #
    # Compute system non-symbolically
    #

    accel_r = evaluate_accel_residuals(np.zeros((bezier_degree-1, 3)), np.zeros(3), np.zeros(3),
                                       accel_timestamps, observed_accels, observed_imu_orientations)
    accel_j = evaluate_accel_jacobians(bezier_degree-1, accel_timestamps, observed_imu_orientations)

    epipolar_r = evaluate_epipolar_residuals(np.zeros((bezier_degree-1, 3)), frame_times,
                                             observed_frame_orientations, observed_features)
    epipolar_j = evaluate_epipolar_jacobians(bezier_degree-1, frame_times,
                                             observed_frame_orientations, observed_features)
    epipolar_j = np.hstack((epipolar_j, np.zeros((epipolar_j.shape[0], 6))))

    residual = np.hstack((accel_r, epipolar_r))
    jacobian = np.vstack((accel_j, epipolar_j))

    #
    # Solve
    #

    JtJ = np.dot(jacobian.T, jacobian)
    Jtr = np.dot(jacobian.T, residual)
    estimated_vars = np.squeeze(np.linalg.solve(JtJ, -Jtr))

    #
    # Unpack result and compute error
    #

    estimated_pos_controls = np.reshape(estimated_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3))
    estimated_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in frame_times])
    estimated_accel_bias = np.asarray(estimated_vars[accel_bias_offset:accel_bias_offset+3])
    estimated_gravity = np.asarray(estimated_vars[gravity_offset:gravity_offset+3])
    re_estimated_gravity = normalized(estimated_gravity) * true_gravity_magnitude

    print '\nEstimated:'
    print estimated_vars

    print '\nGround truth:'
    print true_vars

    print '\nTotal Error:', np.linalg.norm(estimated_vars - true_vars)
    print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
    print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
    print '  True gravity:', true_gravity
    print '  Estimated gravity:', estimated_gravity
    print '  Estimated gravity magnitude:', np.linalg.norm(estimated_gravity)
    print '  Re-normalized gravity error: ', np.linalg.norm(re_estimated_gravity - true_gravity)
    for i in range(num_frames):
        print 'Frame %d error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i]))

    fig = plt.figure(figsize=(14,6))
    ax = fig.add_subplot(1, 2, 1, projection='3d')

    ts = np.linspace(0, 1, 100)
    true_ps = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in ts])
    estimated_ps = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in ts])

    ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b')
    ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r')

    plt.show()
def orientation_residuals(bezier_params, observed_timestamps, observed_orientations):
    return np.hstack([SO3.log(np.dot(predict_orientation(bezier_params, t).T, r))
                      for t, r in zip(observed_timestamps, observed_orientations)])
Beispiel #14
0
def se3_chart(Rt, v):
    return ( dot(Rt[0], SO3.exp(v[:3])), Rt[1] + v[3:] )
Beispiel #15
0
def run_simulation():
    np.random.seed(1)

    #
    # Construct ground truth
    #
    num_frames = 5
    num_landmarks = 50
    num_imu_readings = 80
    bezier_degree = 4
    out = 'out/position_only_bezier3'

    print 'Num landmarks:', num_landmarks
    print 'Num frames:', num_frames
    print 'Num IMU readings:', num_imu_readings
    print 'Bezier curve degree:', bezier_degree

    if not os.path.isdir(out):
        os.mkdir(out)

    # Both splines should start at 0,0,0
    frame_times = np.linspace(0, .9, num_frames)
    imu_times = np.linspace(0, 1, num_imu_readings)

    true_rot_controls = np.random.randn(bezier_degree-1, 3)
    true_pos_controls = np.random.randn(bezier_degree-1, 3)

    true_landmarks = np.random.randn(num_landmarks, 3)

    true_frame_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in frame_times])
    true_frame_orientations = np.array(map(cayley, true_frame_cayleys))
    true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in frame_times])

    true_imu_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in imu_times])
    true_imu_orientations = np.array(map(cayley, true_imu_cayleys))

    true_gravity_magnitude = 9.8
    true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude
    true_accel_bias = np.random.randn(3)
    true_global_accels = np.array([bezier.zero_offset_bezier_second_deriv(true_pos_controls, t) for t in imu_times])
    true_accels = np.array([np.dot(R, a + true_gravity) + true_accel_bias
                            for R, a in zip(true_imu_orientations, true_global_accels)])

    true_features = np.array([[normalized(np.dot(R, x-p)) for x in true_landmarks]
                              for R, p in zip(true_frame_orientations, true_frame_positions)])

    print np.min(true_features.reshape((-1, 3)), axis=0)
    print np.max(true_features.reshape((-1, 3)), axis=0)

    #
    # Add sensor noise
    #

    accel_noise = 0#0.001
    feature_noise = 0#0.01
    orientation_noise = 0.01

    observed_frame_orientations = true_frame_orientations.copy()
    observed_imu_orientations = true_imu_orientations.copy()
    observed_features = true_features.copy()
    observed_accels = true_accels.copy()

    if orientation_noise > 0:
        for i, R in enumerate(observed_frame_orientations):
            R_noise = SO3.exp(np.random.randn(3)*orientation_noise)
            observed_frame_orientations[i] = np.dot(R_noise, R)
        for i, R in enumerate(observed_imu_orientations):
            R_noise = SO3.exp(np.random.randn(3)*orientation_noise)
            observed_imu_orientations[i] = np.dot(R_noise, R)

    if accel_noise > 0:
        observed_accels += np.random.randn(*observed_accels.shape) * accel_noise

    if feature_noise > 0:
        observed_features += np.random.rand(*observed_features.shape) * feature_noise

    #
    # Construct symbolic versions of the above
    #
    position_offs = 0
    accel_bias_offset = position_offs + (bezier_degree-1)*3
    gravity_offset = accel_bias_offset + 3
    num_vars = gravity_offset + 3

    sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)]
    sym_pos_controls = np.reshape(sym_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3))
    sym_accel_bias = np.asarray(sym_vars[accel_bias_offset:accel_bias_offset+3])
    sym_gravity = np.asarray(sym_vars[gravity_offset:gravity_offset+3])

    true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity))
    assert len(true_vars) == len(sym_vars)

    #
    # Compute residuals
    #

    epipolar_residuals = evaluate_epipolar_residuals(sym_pos_controls, frame_times,
                                                     observed_frame_orientations, observed_features)
    accel_residuals = evaluate_accel_residuals(sym_pos_controls, sym_accel_bias, sym_gravity,
                                               imu_times, observed_accels, observed_imu_orientations)

    residuals = np.hstack((accel_residuals, epipolar_residuals))

    print '\nNum vars:', num_vars
    print 'Num residuals:', len(residuals)

    print '\nResiduals:', len(residuals)
    cost = Polynomial(num_vars)
    for r in residuals:
        cost += r*r
        print '  %f   (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r))

    print '\nCost:'
    print '  Num terms: %d' % len(cost)
    print '  Degree: %d' % cost.total_degree

    # Solve
    A, b, k = quadratic_form(cost)
    estimated_vars = np.squeeze(np.linalg.solve(A*2, -b))
    estimated_pos_controls = np.reshape(estimated_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3))
    estimated_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in frame_times])
    estimated_accel_bias = np.asarray(estimated_vars[accel_bias_offset:accel_bias_offset+3])
    estimated_gravity = np.asarray(estimated_vars[gravity_offset:gravity_offset+3])
    re_estimated_gravity = normalized(estimated_gravity) * true_gravity_magnitude

    print '\nEstimated:'
    print estimated_vars

    print '\nGround truth:'
    print true_vars

    print '\nTotal Error:', np.linalg.norm(estimated_vars - true_vars)
    print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
    print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
    print '  True gravity:', true_gravity
    print '  Estimated gravity:', estimated_gravity
    print '  Estimated gravity magnitude:', np.linalg.norm(estimated_gravity)
    print '  Re-normalized gravity error: ', np.linalg.norm(re_estimated_gravity - true_gravity)
    for i in range(num_frames):
        print 'Frame %d error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i]))

    fig = plt.figure(figsize=(14,6))
    ax = fig.add_subplot(1, 2, 1, projection='3d')

    ts = np.linspace(0, 1, 100)
    true_ps = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in ts])
    estimated_ps = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in ts])

    ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b')
    ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r')

    plt.show()
Beispiel #16
0
def generate_rotation(noise):
    return SO3.exp(np.random.randn(3) * noise)
def run_position_estimation():
    #
    # Parameters
    #

    bezier_degree = 4
    num_frames = 8
    num_landmarks = 120
    num_accel_readings = 50
    num_gyro_readings = 60

    gyro_timestamp_noise = 0
    gyro_reading_noise = 1e-3

    accel_timestamp_noise = 0
    accel_reading_noise = 1e-3

    frame_timestamp_noise = 0
    frame_orientation_noise = 1e-3
    feature_noise = 1e-4

    print 'Num landmarks:', num_landmarks
    print 'Num frames:', num_frames
    print 'Num accel readings:', num_accel_readings
    print 'Num gyro readings:', num_gyro_readings
    print 'Bezier curve degree:', bezier_degree

    #
    # Construct ground truth
    #

    true_frame_timestamps = np.linspace(0, 1, num_frames)
    true_accel_timestamps = np.linspace(0, 1, num_accel_readings)

    true_gyro_bias = np.random.rand(3)
    true_accel_bias = np.random.randn(3)
    true_gravity_magnitude = 9.8
    true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude

    true_rot_controls = np.random.randn(bezier_degree-1, 3)
    true_pos_controls = np.random.randn(bezier_degree-1, 3)

    true_landmarks = np.random.randn(num_landmarks, 3) * 5
    true_landmarks[:, 2] += 20

    true_frame_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps])
    true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps])

    true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
                                    for t in true_accel_timestamps])

    true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks]
                              for t in true_frame_timestamps])

    true_gyro_timestamps = np.linspace(0, 1, num_gyro_readings)
    true_gyro_readings = np.array([predict_gyro(true_rot_controls, true_gyro_bias, t)
                                   for t in true_gyro_timestamps])

    #
    # Add sensor noise
    #

    observed_gyro_timestamps = add_white_noise(true_gyro_timestamps, gyro_timestamp_noise)
    observed_gyro_readings = add_white_noise(true_gyro_readings, gyro_reading_noise)

    observed_accel_timestamps = add_white_noise(true_accel_timestamps, accel_timestamp_noise)
    observed_accel_readings = add_white_noise(true_accel_readings, accel_reading_noise)

    observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise)
    observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise)
    observed_frame_orientations[0] = true_frame_orientations[0]  # do not add noise to first frame

    observed_features = add_white_noise(true_features, feature_noise)

    #
    # Plot features
    #
    plt.clf()
    plot_tracks(true_features, 'x-g', limit=10, alpha=.4)
    plot_tracks(observed_features, 'o-r', limit=10, alpha=.4)
    plt.show()
    return

    #
    #  Solve for orientation and gyro bias
    #

    print 'Estimating orientation...'
    estimated_gyro_bias, estimated_rot_controls = estimate_orientation(
        bezier_degree,
        observed_gyro_timestamps,
        observed_gyro_readings,
        observed_frame_timestamps,
        observed_frame_orientations)

    estimated_accel_orientations = np.array([predict_orientation(estimated_rot_controls, t)
                                             for t in observed_accel_timestamps])

    #
    # Solve for position, accel bias, and gravity
    #

    print 'Estimating position...'
    estimated_pos_controls, estimated_accel_bias, estimated_gravity = estimate_position(
        bezier_degree,
        observed_accel_timestamps,
        estimated_accel_orientations,
        observed_accel_readings,
        observed_frame_timestamps,
        observed_frame_orientations,
        observed_features)

    estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in true_frame_timestamps])

    estimated_pfeatures = np.array([[pr(predict_feature(estimated_pos_controls, true_rot_controls, x, t))
                                     for x in true_landmarks]
                                    for t in true_frame_timestamps])
    true_pfeatures = pr(true_features)
    observed_pfeatures = pr(observed_features)

    #
    # Report
    #

    print 'Gyro bias error:', np.linalg.norm(estimated_gyro_bias - true_gyro_bias)
    print '  True gyro bias:', true_gyro_bias
    print '  Estimated gyro bias:', estimated_gyro_bias

    print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias)
    print '  True accel bias:', true_accel_bias
    print '  Estimated accel bias:', estimated_accel_bias

    print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity)
    print '  True gravity:', true_gravity
    print '  Estimated gravity:', estimated_gravity
    print '  Estimated gravity magnitude:', np.linalg.norm(estimated_gravity)
    for i in range(num_frames):
        print 'Frame %d position error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i]))

    #
    # Plot orientation results
    #

    plot_timestamps = np.linspace(0, 1, 50)
    estimated_gyro_readings = np.array([predict_gyro(estimated_rot_controls, true_gyro_bias, t)
                                        for t in plot_timestamps])

    true_orientations = np.array([SO3.log(predict_orientation(true_rot_controls, t))
                                  for t in plot_timestamps])
    estimated_orientations = np.array([SO3.log(predict_orientation(estimated_rot_controls, t))
                                       for t in plot_timestamps])
    observed_orientations = np.array(map(SO3.log, observed_frame_orientations))

    plt.figure(figsize=(14, 6))
    plt.subplot(1, 2, 1)
    plt.title('Gyro readings')
    plt.plot(plot_timestamps, estimated_gyro_readings, ':', label='estimated', alpha=1)
    plt.plot(true_gyro_timestamps, true_gyro_readings, '-', label='true', alpha=.3)
    plt.plot(true_gyro_timestamps, observed_gyro_readings, 'x', label='observed')
    plt.xlim(-.1, 1.5)
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Orientation')
    plt.plot(plot_timestamps, estimated_orientations, ':', label='estimated', alpha=1)
    plt.plot(plot_timestamps, true_orientations, '-', label='true', alpha=.3)
    plt.plot(true_frame_timestamps, observed_orientations, 'x', label='observed')
    plt.xlim(-.1, 1.5)
    plt.legend()

    #
    # Plot position results
    #

    plot_timestamps = np.linspace(0, 1, 100)

    true_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in plot_timestamps])
    estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in plot_timestamps])

    true_accels = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t)
                            for t in plot_timestamps])
    estimated_accels = np.array([predict_accel(estimated_pos_controls, true_rot_controls, estimated_accel_bias, estimated_gravity, t)
                                 for t in plot_timestamps])

    fig = plt.figure(figsize=(14, 10))
    ax = fig.add_subplot(2, 2, 1, projection='3d')
    ax.plot(true_positions[:, 0], true_positions[:, 1], true_positions[:, 2], '-b')
    ax.plot(estimated_positions[:, 0], estimated_positions[:, 1], estimated_positions[:, 2], '-r')
    #ax.plot(true_landmarks[:,0], true_landmarks[:,1], true_landmarks[:,2], '.k', alpha=.2)

    ax = fig.add_subplot(2, 2, 2)
    ax.plot(plot_timestamps, estimated_accels, ':', label='estimated', alpha=1)
    ax.plot(plot_timestamps, true_accels, '-', label='true', alpha=.3)
    ax.plot(observed_accel_timestamps, observed_accel_readings, 'x', label='observed')
    ax.legend()
    ax.set_xlim(-.1, 1.5)

    ax = fig.add_subplot(2, 2, 3)
    ax.plot(true_pfeatures[1, :, 0], true_pfeatures[1, :, 1], 'x', label='true', alpha=.8)
    ax.plot(estimated_pfeatures[1, :, 0], estimated_pfeatures[1, :, 1], 'o', label='estimated', alpha=.4)

    ax = fig.add_subplot(2, 2, 4)
    ax.plot(true_pfeatures[-1, :, 0], true_pfeatures[-1, :, 1], '.', label='true', alpha=.8)
    ax.plot(observed_pfeatures[-1, :, 0], observed_pfeatures[-1, :, 1], 'x', label='observed', alpha=.8)
    ax.plot(estimated_pfeatures[-1, :, 0], estimated_pfeatures[-1, :, 1], 'o', label='estimated', alpha=.4)

    plt.show()
def angular_velocity_left(f, t, step=1e-8):
    return SO3.log(np.dot(f(t + step), f(t).T)) / step
 def perturb_normalized(R, t, delta):
     assert len(delta) == 5
     return np.dot(SO3.exp(delta[:3]), R), normalized(t + delta[3]*u + delta[4]*v)
Beispiel #20
0
def perturb_rotation(R, noise):
    return dots(R, SO3.exp(np.random.randn(3) * noise))
Beispiel #21
0
def generate_rotation(noise):
    return SO3.exp(np.random.randn(3) * noise)
Beispiel #22
0
def perturb_rotation(R, noise):
    return dots(R, SO3.exp(np.random.randn(3) * noise))
def add_orientation_noise(x, sigma):
    x = np.atleast_3d(x)
    return np.array([np.dot(xi, SO3.exp(np.random.randn(3)*sigma)) for xi in x])
def run_optimize():
    bezier_order = 3
    num_gyro_readings = 50
    num_frames = 5

    frame_timestamp_noise = 1e-3
    frame_orientation_noise = .02
    gyro_timestamp_noise = 1e-3
    gyro_noise = .01

    #path = os.path.expanduser('~/Data/Initialization/closed_flat/gyro.txt')
    #gyro_data = np.loadtxt(path)
    #gyro_timestamps = gyro_data[:,0]
    #gyro_readings = gyro_data[:,1:]

    true_gyro_timestamps = np.linspace(0, 1, num_gyro_readings)
    true_params = np.random.rand(bezier_order, 3)
    true_gyro_bias = np.random.rand(3)
    true_gyro_readings = np.array([predict_gyro(true_params, true_gyro_bias, t)
                                   for t in true_gyro_timestamps])

    true_frame_timestamps = np.linspace(0, 1, num_frames)
    true_frame_orientations = np.array([predict_orientation(true_params, t) for t in true_frame_timestamps])

    observed_gyro_timestamps = add_white_noise(true_gyro_timestamps, gyro_timestamp_noise)
    observed_gyro_readings = add_white_noise(true_gyro_readings, gyro_noise)
    observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise)
    observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise)

    estimated_gyro_bias, estimated_params = estimate_orientation(bezier_order,
                                                                 observed_gyro_timestamps,
                                                                 observed_gyro_readings,
                                                                 observed_frame_timestamps,
                                                                 observed_frame_orientations)

    print '\nTrue params:'
    print true_params

    print '\nEstimated params:'
    print estimated_params

    print '\nTrue gyro bias:'
    print true_gyro_bias

    print '\nEstimated gyro bias:'
    print estimated_gyro_bias

    plot_timestamps = np.linspace(0, 1, 50)

    estimated_gyro_readings = np.array([predict_gyro(estimated_params, true_gyro_bias, t)
                                        for t in plot_timestamps])

    true_orientations = np.array([SO3.log(predict_orientation(true_params, t))
                                  for t in plot_timestamps])
    observed_orientations = np.array(map(SO3.log, observed_frame_orientations))
    estimated_orientations = np.array([SO3.log(predict_orientation(estimated_params, t))
                                       for t in plot_timestamps])

    plt.figure(1)
    plt.plot(true_gyro_timestamps, true_gyro_readings, '-', label='true')
    plt.plot(true_gyro_timestamps, observed_gyro_readings, 'x', label='observed')
    plt.plot(plot_timestamps, estimated_gyro_readings, ':', label='estimated')
    plt.xlim(-.1, 1.5)
    plt.legend()

    plt.figure(2)
    plt.plot(plot_timestamps, true_orientations, '-', label='true')
    plt.plot(true_frame_timestamps, observed_orientations, 'x', label='observed')
    plt.plot(plot_timestamps, estimated_orientations, ':', label='estimated')
    plt.xlim(-.1, 1.5)
    plt.legend()

    plt.show()