Beispiel #1
0
    def maha_test(self, x, P, kind, z, R, extra_args=[], maha_thresh=0.95):
        # init vars
        z = z.reshape((-1, 1))
        h = np.zeros(z.shape, dtype=np.float64)
        H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)

        # C functions
        self.hs[kind](x, extra_args, h)
        self.Hs[kind](x, extra_args, H)

        # y is the "loss"
        y = z - h

        # if using eskf
        H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
        self.H_mod(x, H_mod)
        H = H.dot(H_mod)

        a = np.linalg.inv(H.dot(P).dot(H.T) + R)
        maha_dist = y.T.dot(a.dot(y))
        if maha_dist > chi2_ppf(maha_thresh, y.shape[0]):
            return False
        else:
            return True
Beispiel #2
0
    def _update_python(self, x, P, kind, z, R, extra_args=[]):
        # init vars
        z = z.reshape((-1, 1))
        h = np.zeros(z.shape, dtype=np.float64)
        H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)

        # C functions
        self.hs[kind](x, extra_args, h)
        self.Hs[kind](x, extra_args, H)

        # y is the "loss"
        y = z - h

        # *** same above this line ***

        if self.msckf and kind in self.Hes:
            # Do some algebraic magic to decorrelate
            He = np.zeros((z.shape[0], len(extra_args)), dtype=np.float64)
            self.Hes[kind](x, extra_args, He)

            # TODO: Don't call a function here, do projection locally
            A = null(He.T)

            y = A.T.dot(y)
            H = A.T.dot(H)
            R = A.T.dot(R.dot(A))

            # TODO If nullspace isn't the dimension we want
            if A.shape[1] + He.shape[1] != A.shape[0]:
                print(
                    'Warning: null space projection failed, measurement ignored'
                )
                return x, P, np.zeros(A.shape[0] - He.shape[1])

        # if using eskf
        H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
        self.H_mod(x, H_mod)
        H = H.dot(H_mod)

        # Do mahalobis distance test
        # currently just runs on msckf observations
        # could run on anything if needed
        if self.msckf and kind in self.maha_test_kinds:
            a = np.linalg.inv(H.dot(P).dot(H.T) + R)
            maha_dist = y.T.dot(a.dot(y))
            if maha_dist > chi2_ppf(0.95, y.shape[0]):
                R = 10e16 * R

        # *** same below this line ***

        # Outlier resilient weighting as described in:
        # "A Kalman Filter for Robust Outlier Detection - Jo-Anne Ting, ..."
        weight = 1  # (1.5)/(1 + np.sum(y**2)/np.sum(R))

        S = dot(dot(H, P), H.T) + R / weight
        K = solve(S, dot(H, P.T)).T
        I_KH = np.eye(P.shape[0]) - dot(K, H)

        # update actual state
        delta_x = dot(K, y)
        P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)

        # inject observed error into state
        x_new = np.zeros(x.shape, dtype=np.float64)
        self.err_function(x, delta_x, x_new)
        return x_new, P, y.flatten()
Beispiel #3
0
def gen_code(name,
             f_sym,
             dt_sym,
             x_sym,
             obs_eqs,
             dim_x,
             dim_err,
             eskf_params=None,
             msckf_params=None,
             maha_test_kinds=[]):
    # optional state transition matrix, H modifier
    # and err_function if an error-state kalman filter (ESKF)
    # is desired. Best described in "Quaternion kinematics
    # for the error-state Kalman filter" by Joan Sola

    if eskf_params:
        err_eqs = eskf_params[0]
        inv_err_eqs = eskf_params[1]
        H_mod_sym = eskf_params[2]
        f_err_sym = eskf_params[3]
        x_err_sym = eskf_params[4]
    else:
        nom_x = sp.MatrixSymbol('nom_x', dim_x, 1)
        true_x = sp.MatrixSymbol('true_x', dim_x, 1)
        delta_x = sp.MatrixSymbol('delta_x', dim_x, 1)
        err_function_sym = sp.Matrix(nom_x + delta_x)
        inv_err_function_sym = sp.Matrix(true_x - nom_x)
        err_eqs = [err_function_sym, nom_x, delta_x]
        inv_err_eqs = [inv_err_function_sym, nom_x, true_x]

        H_mod_sym = sp.Matrix(np.eye(dim_x))
        f_err_sym = f_sym
        x_err_sym = x_sym

    # This configures the multi-state augmentation
    # needed for EKF-SLAM with MSCKF (Mourikis et al 2007)
    if msckf_params:
        msckf = True
        dim_main = msckf_params[0]  # size of the main state
        dim_augment = msckf_params[1]  # size of one augment state chunk
        dim_main_err = msckf_params[2]
        dim_augment_err = msckf_params[3]
        N = msckf_params[4]
        feature_track_kinds = msckf_params[5]
        assert dim_main + dim_augment * N == dim_x
        assert dim_main_err + dim_augment_err * N == dim_err
    else:
        msckf = False
        dim_main = dim_x
        dim_augment = 0
        dim_main_err = dim_err
        dim_augment_err = 0
        N = 0

    # linearize with jacobians
    F_sym = f_err_sym.jacobian(x_err_sym)

    if eskf_params:
        for sym in x_err_sym:
            F_sym = F_sym.subs(sym, 0)

    assert dt_sym in F_sym.free_symbols

    for i in range(len(obs_eqs)):
        obs_eqs[i].append(obs_eqs[i][0].jacobian(x_sym))
        if msckf and obs_eqs[i][1] in feature_track_kinds:
            obs_eqs[i].append(obs_eqs[i][0].jacobian(obs_eqs[i][2]))
        else:
            obs_eqs[i].append(None)

    # collect sympy functions
    sympy_functions = []

    # error functions
    sympy_functions.append(('err_fun', err_eqs[0], [err_eqs[1], err_eqs[2]]))
    sympy_functions.append(
        ('inv_err_fun', inv_err_eqs[0], [inv_err_eqs[1], inv_err_eqs[2]]))

    # H modifier for ESKF updates
    sympy_functions.append(('H_mod_fun', H_mod_sym, [x_sym]))

    # state propagation function
    sympy_functions.append(('f_fun', f_sym, [x_sym, dt_sym]))
    sympy_functions.append(('F_fun', F_sym, [x_sym, dt_sym]))

    # observation functions
    for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
        sympy_functions.append(('h_%d' % kind, h_sym, [x_sym, ea_sym]))
        sympy_functions.append(('H_%d' % kind, H_sym, [x_sym, ea_sym]))
        if msckf and kind in feature_track_kinds:
            sympy_functions.append(('He_%d' % kind, He_sym, [x_sym, ea_sym]))

    # Generate and wrap all th c code
    header, code = sympy_into_c(sympy_functions)
    extra_header = "#define DIM %d\n" % dim_x
    extra_header += "#define EDIM %d\n" % dim_err
    extra_header += "#define MEDIM %d\n" % dim_main_err
    extra_header += "typedef void (*Hfun)(double *, double *, double *);\n"

    extra_header += "\nvoid predict(double *x, double *P, double *Q, double dt);"

    extra_post = ""

    for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
        if msckf and kind in feature_track_kinds:
            He_str = 'He_%d' % kind
            # ea_dim = ea_sym.shape[0]
        else:
            He_str = 'NULL'
            # ea_dim = 1 # not really dim of ea but makes c function work
        maha_thresh = chi2_ppf(0.95, int(
            h_sym.shape[0]))  # mahalanobis distance for outlier detection
        maha_test = kind in maha_test_kinds
        extra_post += """
      void update_%d(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea) {
        update<%d,%d,%d>(in_x, in_P, h_%d, H_%d, %s, in_z, in_R, in_ea, MAHA_THRESH_%d);
      }
    """ % (kind, h_sym.shape[0], 3, maha_test, kind, kind, He_str, kind)
        extra_header += "\nconst static double MAHA_THRESH_%d = %f;" % (
            kind, maha_thresh)
        extra_header += "\nvoid update_%d(double *, double *, double *, double *, double *);" % kind

    code += '\nextern "C"{\n' + extra_header + "\n}\n"
    code += "\n" + open(os.path.join(TEMPLATE_DIR, "ekf_c.c")).read()
    code += '\nextern "C"{\n' + extra_post + "\n}\n"
    header += "\n" + extra_header

    write_code(name, code, header)