Ejemplo n.º 1
0
def test_saver_kf():
    kf = kinematic_kf(3, 2, dt=0.1, dim_z=3)
    s = Saver(kf)

    for i in range(10):
        kf.predict()
        kf.update([i, i, i])
        s.save()

    # this will assert if the KalmanFilter did not properly assert
    s.to_array()
    assert len(s.x) == 10
    assert len(s.y) == 10
    assert len(s.K) == 10
    assert (len(s) == len(s.x))

    # Force an exception to occur my malforming K
    kf = kinematic_kf(3, 2, dt=0.1, dim_z=3)
    kf.K = 0.
    s2 = Saver(kf)

    for i in range(10):
        kf.predict()
        kf.update([i, i, i])
        s2.save()

    # this should raise an exception because K is malformed
    try:
        s2.to_array()
        assert False, "Should not have been able to convert s.K into an array"
    except:
        pass
Ejemplo n.º 2
0
def test_1d_array():
    f1 = GHFilter (0, 0, 1, .8, .2)
    f2 = GHFilter (array([0]), array([0]), 1, .8, .2)

    str(f1)
    str(f2)

    # test both give same answers, and that we can
    # use a scalar for the measurment
    for i in range(1,10):
        f1.update(i)
        f2.update(i)

        assert f1.x == f2.x[0]
        assert f1.dx == f2.dx[0]

        assert f1.VRF() == f2.VRF()

    # test using an array for the measurement
    s1 = Saver(f1)
    s2 = Saver(f2)

    for i in range(1,10):
        f1.update(i)
        f2.update(array([i]))

        s1.save()
        s2.save()

        assert f1.x == f2.x[0]
        assert f1.dx == f2.dx[0]

        assert f1.VRF() == f2.VRF()
    s1.to_array()
    s2.to_array()
Ejemplo n.º 3
0
def test_save_properties():
    global f, s

    class Foo(object):
        aa = 3

        def __init__(self):
            self.x = 7.
            self.a = None

        @property
        def ll(self):
            self.a = Foo.aa
            Foo.aa += 1
            return self.a

    f = Foo()
    assert f.a is None
    s = Saver(f)
    s.save()  # try to trigger property writing to Foo.a

    assert s.a[0] == f.a
    assert s.ll[0] == f.a
    assert f.a == 3

    s.save()
    assert s.a[1] == f.a
    assert s.ll[1] == f.a
    assert f.a == 4
Ejemplo n.º 4
0
def run_filter(list_args: List[float],
               data: pd.DataFrame) -> Tuple[KalmanFilter, Saver]:

    if isinstance(data, tuple):
        #  data is the first *args
        data = data[0]

    # store the parameters in the list_args
    s_noise_Q00, r_noise_Q11, q_meas_error_R00, r_meas_error_R11 = list_args

    kf = init_filter(
        r0=0.0,
        S0=5.74,
        s_variance_P00=1,
        r_variance_P11=100,
        s_noise_Q00=s_noise_Q00,
        r_noise_Q11=r_noise_Q11,
        q_meas_error_R00=q_meas_error_R00,
        r_meas_error_R11=r_meas_error_R11,
    )

    s = Saver(kf)
    lls = []

    for z in np.vstack([data["q_obs"], data["r_obs"]]).T:
        kf.predict()
        kf.update(z)
        log_likelihood = kf.log_likelihood_of(z)
        lls.append(log_likelihood)
        s.save()

    s.to_array()
    lls = np.array(lls)

    return kf, s, lls
def myKalmanFilter(Kalman_filter=None, zs=None):
    s = Saver(Kalman_filter)
    Kalman_filter.batch_filter(zs, saver=s)
    s.to_array()
    zs_filtered = s.x[:, 0]
    err = s.x[:, 1]
    return zs_filtered, err
Ejemplo n.º 6
0
def test_saver_UKF():
    def fx(x, dt):
        F = np.array(
            [[1, dt, 0, 0], [0, 1, 0, 0], [0, 0, 1, dt], [0, 0, 0, 1]],
            dtype=float)

        return np.dot(F, x)

    def hx(x):
        return np.array([x[0], x[2]])

    dt = 0.1
    points = MerweScaledSigmaPoints(4, .1, 2., -1)
    kf = UnscentedKalmanFilter(dim_x=4,
                               dim_z=2,
                               dt=dt,
                               fx=fx,
                               hx=hx,
                               points=points)

    z_std = 0.1
    kf.R = np.diag([z_std**2, z_std**2])  # 1 standard
    kf.x = np.array([-1., 1., -1., 1])
    kf.P *= 1.

    zs = [[i, i] for i in range(40)]
    s = Saver(kf, skip_private=False, skip_callable=False, ignore=['z_mean'])
    for z in zs:
        kf.predict()
        kf.update(z)
        #print(kf.x, kf.log_likelihood, kf.P.diagonal())
        s.save()
    s.to_array()
Ejemplo n.º 7
0
def _test_log_likelihood():

    from filterpy.common import Saver

    def fx(x, dt):
        F = np.array(
            [[1, dt, 0, 0], [0, 1, 0, 0], [0, 0, 1, dt], [0, 0, 0, 1]],
            dtype=float)

        return np.dot(F, x)

    def hx(x):
        return np.array([x[0], x[2]])

    dt = 0.1
    points = MerweScaledSigmaPoints(4, .1, 2., -1)
    kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)

    z_std = 0.1
    kf.R = np.diag([z_std**2, z_std**2])  # 1 standard
    kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=1.1**2, block_size=2)

    kf.x = np.array([-1., 1., -1., 1])
    kf.P *= 1.

    zs = [[i + randn() * z_std, i + randn() * z_std] for i in range(40)]
    s = Saver(kf)
    for z in zs:
        kf.predict()
        kf.update(z)
        print(kf.x, kf.log_likelihood, kf.P.diagonal())
        s.save()
    s.to_array()
    plt.plot(s.x[:, 0], s.x[:, 2])
Ejemplo n.º 8
0
def test_noisy_1d():
    f = KalmanFilter(dim_x=2, dim_z=1)

    f.x = np.array([[2.],
                    [0.]])       # initial state (location and velocity)

    f.F = np.array([[1., 1.],
                    [0., 1.]])    # state transition matrix

    f.H = np.array([[1., 0.]])    # Measurement function
    f.P *= 1000.                  # covariance matrix
    f.R = 5                       # state uncertainty
    f.Q = 0.0001                  # process uncertainty

    measurements = []
    results = []

    zs = []
    for t in range(100):
        # create measurement = t plus white noise
        z = t + random.randn()*20
        zs.append(z)

        # perform kalman filtering
        f.update(z)
        f.predict()

        # save data
        results.append(f.x[0, 0])
        measurements.append(z)

        # test mahalanobis
        a = np.zeros(f.y.shape)
        maha = scipy_mahalanobis(a, f.y, f.SI)
        assert f.mahalanobis == approx(maha)


    # now do a batch run with the stored z values so we can test that
    # it is working the same as the recursive implementation.
    # give slightly different P so result is slightly different
    f.x = np.array([[2., 0]]).T
    f.P = np.eye(2) * 100.
    s = Saver(f)
    m, c, _, _ = f.batch_filter(zs, update_first=False, saver=s)
    s.to_array()
    assert len(s.x) == len(zs)
    assert len(s.x) == len(s)

    # plot data
    if DO_PLOT:
        p1, = plt.plot(measurements, 'r', alpha=0.5)
        p2, = plt.plot(results, 'b')
        p4, = plt.plot(m[:, 0], 'm')
        p3, = plt.plot([0, 100], [0, 100], 'g')  # perfect result
        plt.legend([p1, p2, p3, p4],
                   ["noisy measurement", "KF output", "ideal", "batch"], loc=4)
        plt.show()
Ejemplo n.º 9
0
def nu4_builder(Qvar, Rvar, dt):
    F4 = calc_F(Am4, dt)
    f_var = numpy.diag(Qvar)**2  # (kg/h)**2 Will be larger for
    Q4 = numpy.atleast_2d(F4[:, 2:]) @ f_var @ numpy.atleast_2d(F4[:, 2:]).T
    R4 = numpy.eye(2) * Rvar ** 2
    four_states = OwnKF(4, 2)
    four_states.assign_discrete(F4, Q4, H4, R4, P4, numpy.append(lin_m0, [0, 0]))
    sav = Saver(four_states)
    return four_states, sav, null_func
Ejemplo n.º 10
0
def nu3_builder(Qvar, Rvar, dt):
    F3 = calc_F(Am3, dt)
    f_var = numpy.array([[Qvar**2]])  # (kg/h)**2 Will be larger for
    Q3 = numpy.atleast_2d(F3[:, 2]).T @ f_var @ numpy.atleast_2d(F3[:, 2])
    R3 = numpy.eye(2) * Rvar ** 2
    three_state = OwnKF(3, 2)
    three_state.assign_discrete(F3, Q3, H3, R3, P3, numpy.append(lin_m0, 0))
    sav = Saver(three_state)
    return three_state, sav, null_func
Ejemplo n.º 11
0
def test_1d():
    f = KalmanFilter(dim_x=2, dim_z=1)
    inf = InformationFilter(dim_x=2, dim_z=1)

    # ensure __repr__ doesn't assert
    str(inf)


    f.x = np.array([[2.],
                    [0.]])       # initial state (location and velocity)

    inf.x = f.x.copy()
    f.F = (np.array([[1.,1.],
                     [0.,1.]]))    # state transition matrix

    inf.F = f.F.copy()
    f.H = np.array([[1.,0.]])      # Measurement function
    inf.H = np.array([[1.,0.]])    # Measurement function
    f.R *= 5                       # state uncertainty
    inf.R_inv *= 1./5               # state uncertainty
    f.Q *= 0.0001                  # process uncertainty
    inf.Q *= 0.0001


    m = []
    r = []
    r2 = []
    zs = []
    s = Saver(inf)
    for t in range (100):
        # create measurement = t plus white noise
        z = t + random.randn()*20
        zs.append(z)

        # perform kalman filtering
        f.update(z)
        f.predict()

        inf.update(z)
        inf.predict()

        # save data
        r.append (f.x[0,0])
        r2.append (inf.x[0,0])
        m.append(z)
        print(inf.y)
        print(inf.SI)
        s.save()

        assert abs(f.x[0,0] - inf.x[0,0]) < 1.e-12


    if DO_PLOT:
        plt.plot(m)
        plt.plot(r)
        plt.plot(r2)
Ejemplo n.º 12
0
def nu2_builder(Qvar, Rvar, dt):
    F1 = calc_F(Am1, dt)
    F3 = calc_F(Am3, dt)
    f_var = numpy.array([[Qvar ** 2]])
    Q1 = numpy.atleast_2d(F3[:2, 2]).T @ f_var @ numpy.atleast_2d(F3[:2, 2])
    R1 = numpy.eye(2) * Rvar ** 2
    two_no_u = OwnKF(2, 2)
    two_no_u.assign_discrete(F1, Q1, H1, R1, P1, lin_m0)
    sav = Saver(two_no_u)
    return two_no_u, sav, null_func
    def _fitTheModel(self, saveDirectory):

        '''Apply the Kalman Filter to estimate the hidden state at time t for t = [0...ntimesteps-1] given observations up to and including time t'''

        # Initialize the parameters:
        self._defineModelParameters()

        # Create Saver object
        self.saverObject = Saver(self.kalmanFilter)

        # Loop the portfolio dict:
        for eachAssetName, eachAssetDataFrame in self.PORTFOLIO._portfolioDict.items():

            # Create a container for the looping:
            self.means = []

            # Re-initialize the parameters:
            self._defineModelParameters()

            # Loop each dataframe row:
            for eachIndex, eachRow in eachAssetDataFrame.iterrows():

                # Just perform the standard predict/update loop
                self.kalmanFilter.predict()
                self.kalmanFilter.update(eachRow['close'])

                # Append to list:
                self.means.append(self.kalmanFilter.x[0])
            
            # Create the new column in the dataframe: 
            eachAssetDataFrame['KFValue'] = self.means

            # NOTE: Create the slope of the KF:
            eachAssetDataFrame['KFValue_Slope'] = eachAssetDataFrame['KFValue'].pct_change()
            eachAssetDataFrame['KFValue_Binary'] = (eachAssetDataFrame['KFValue_Slope'] > 0).astype(int)
            eachAssetDataFrame['KFValue_PlotCol'] = eachAssetDataFrame['KFValue']
            #print(eachAssetDataFrame['KFValue_Slope'].head())
            #print(eachAssetDataFrame['KFValue_Binary'].head())

            # NOTE: Create a moving average to see the difference:
            # SMA
            eachAssetDataFrame['MA'] = eachAssetDataFrame['close'].rolling(window=25).mean()
            # WMA
            #eachAssetDataFrame['MA'] = tb.WMA(eachAssetDataFrame['close'].values, timeperiod=23)
            # EMA
            #eachAssetDataFrame['MA'] = tb.EMA(eachAssetDataFrame['close'].values, timeperiod=23)
            eachAssetDataFrame.dropna(how='any', inplace=True)

            # Save the filter values for further reference:
            self._saveModel(eachAssetName, saveDirectory)  

        # Print:
        logger.warning(self.PORTFOLIO._portfolioDict['WS30'])
Ejemplo n.º 14
0
def test_MMAE2():
    dt = 0.1
    pos, zs = generate_data(120, noise_factor=0.6)
    z_xs = zs[:, 0]

    dt = 0.1
    ca = make_ca_filter(dt, noise_factor=0.6)
    cv = make_ca_filter(dt, noise_factor=0.6)
    cv.F[:, 2] = 0  # remove acceleration term
    cv.P[2, 2] = 0
    cv.Q[2, 2] = 0

    filters = [cv, ca]

    bank = MMAEFilterBank(filters, (0.5, 0.5), dim_x=3, H=ca.H)

    xs, probs = [], []
    cvxs, caxs = [], []
    s = Saver(bank)
    for i, z in enumerate(z_xs):
        bank.predict()
        bank.update(z)
        xs.append(bank.x[0])
        cvxs.append(cv.x[0])
        caxs.append(ca.x[0])
        print(i, cv.likelihood, ca.likelihood, bank.p)
        s.save()
        probs.append(bank.p[0] / bank.p[1])
    s.to_array()

    if DO_PLOT:
        plt.subplot(121)
        plt.plot(xs)
        plt.plot(pos[:, 0])
        plt.subplot(122)
        plt.plot(probs)
        plt.title('probability ratio p(cv)/p(ca)')

        plt.figure()
        plt.plot(cvxs, label='CV')
        plt.plot(caxs, label='CA')
        plt.plot(pos[:, 0])
        plt.legend()

        plt.figure()
        plt.plot(xs)
        plt.plot(pos[:, 0])

    return bank
Ejemplo n.º 15
0
def test_1d_const_vel():

    def hx(x):
        return np.array([x[0]])

    F = np.array([[1., 1.],[0., 1.]])
    def fx(x, dt):
        return np.dot(F, x)

    x = np.array([0., 1.])
    P = np.eye(2)* 100.
    f = EnKF(x=x, P=P, dim_z=1, dt=1., N=8, hx=hx, fx=fx)

    std_noise = 10.

    f.R *= std_noise**2
    f.Q = Q_discrete_white_noise(2, 1., .001)

    measurements = []
    results = []
    ps = []
    zs = []
    s = Saver(f)
    for t in range (0,100):
        # create measurement = t plus white noise
        z = t + randn()*std_noise
        zs.append(z)

        f.predict()
        f.update(np.asarray([z]))

        # save data
        results.append (f.x[0])
        measurements.append(z)
        ps.append(3*(f.P[0,0]**.5))
        s.save()
    s.to_array()

    results = np.asarray(results)
    ps = np.asarray(ps)

    if DO_PLOT:
        plt.plot(results, label='EnKF')
        plt.plot(measurements, c='r', label='z')
        plt.plot (results-ps, c='k',linestyle='--', label='3$\sigma$')
        plt.plot(results+ps, c='k', linestyle='--')
        plt.legend(loc='best')
        #print(ps)
    return f
Ejemplo n.º 16
0
def wu2_builder(Qvar, Rvar, dt):
    F1 = calc_F(Am1, dt)
    F3 = calc_F(Am3, dt)
    f_var = numpy.array([[Qvar ** 2]])
    Q1 = numpy.atleast_2d(F3[:2, 2]).T @ f_var @ numpy.atleast_2d(F3[:2, 2])
    R1 = numpy.eye(2) * Rvar ** 2
    G2 = calc_G(Am1, B2, dt)

    two_with_u = OwnKF(2, 2, 2)
    two_with_u.assign_discrete(F1, Q1, H1, R1, P1, lin_m0, G2)
    sav = Saver(two_with_u)

    def ufunc(in_flows):
        return ((numpy.sum(in_flows, axis=1)-nom_F)*numpy.atleast_2d(split).T).T

    return two_with_u, sav, ufunc
Ejemplo n.º 17
0
def test_1d():
    def fx(x, dt):
        F = np.array([[1., dt], [0, 1]])

        return np.dot(F, x)

    def hx(x):
        return x[0:1]

    ckf = CKF(dim_x=2, dim_z=1, dt=0.1, hx=hx, fx=fx)

    ckf.x = np.array([[1.], [2.]])
    ckf.P = np.array([[1, 1.1], [1.1, 3]])

    ckf.R = np.eye(1) * .05
    ckf.Q = np.array([[0., 0], [0., .001]])

    dt = 0.1
    points = MerweScaledSigmaPoints(2, .1, 2., -1)
    kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=fx, hx=hx, points=points)

    kf.x = np.array([1, 2])
    kf.P = np.array([[1, 1.1], [1.1, 3]])
    kf.R *= 0.05
    kf.Q = np.array([[0., 0], [0., .001]])

    s = Saver(kf)
    for i in range(50):
        z = np.array([[i + randn() * 0.1]])
        ckf.predict()
        ckf.update(z)
        kf.predict()
        kf.update(z[0])
        assert abs(ckf.x[0] - kf.x[0]) < 1e-10
        assert abs(ckf.x[1] - kf.x[1]) < 1e-10
        s.save()

        # test mahalanobis
        a = np.zeros(kf.y.shape)
        maha = scipy_mahalanobis(a, kf.y, kf.SI)
        assert kf.mahalanobis == approx(maha)

    s.to_array()
Ejemplo n.º 18
0
def test_saver_ekf():
    def HJ(x):
        return np.array([[1, 1]])

    def hx(x):
        return np.array([x[0]])

    kf = ExtendedKalmanFilter(2, 1)
    s = Saver(kf)

    for i in range(3):
        kf.predict()
        kf.update([[i]], HJ, hx)
        s.save()

    # this will assert if the KalmanFilter did not properly assert
    s.to_array()
    assert len(s.x) == 3
    assert len(s.y) == 3
    assert len(s.K) == 3
Ejemplo n.º 19
0
def test_1d():
    def fx(x, dt):
        F = np.array([[1., dt], [0, 1]])

        return np.dot(F, x)

    def hx(x):
        return x[0:1]

    ckf = CKF(dim_x=2, dim_z=1, dt=0.1, hx=hx, fx=fx)

    ckf.x = np.array([[1.], [2.]])
    ckf.P = np.array([[1, 1.1], [1.1, 3]])

    ckf.R = np.eye(1) * .05
    ckf.Q = np.array([[0., 0], [0., .001]])

    dt = 0.1
    points = MerweScaledSigmaPoints(2, .1, 2., -1)
    kf = UKF(dim_x=2, dim_z=1, dt=dt, fx=fx, hx=hx, points=points)

    kf.x = np.array([1, 2])
    kf.P = np.array([[1, 1.1], [1.1, 3]])
    kf.R *= 0.05
    kf.Q = np.array([[0., 0], [0., .001]])

    s = Saver(kf)
    for i in range(50):
        z = np.array([[i + randn() * 0.1]])
        #xx, pp, Sx = predict(f, x, P, Q)
        #x, P = update(h, z, xx, pp, R)
        ckf.predict()
        ckf.update(z)
        kf.predict()
        kf.update(z[0])
        assert abs(ckf.x[0] - kf.x[0]) < 1e-10
        assert abs(ckf.x[1] - kf.x[1]) < 1e-10
        s.save()
    s.to_array()
Ejemplo n.º 20
0
def perfect_builder(Qvar, Rvar, dt):
    F1 = calc_F(Am1, dt)
    F3 = calc_F(Am3, dt)
    f_var = numpy.diag(Qvar) ** 2
    # f_var = numpy.array([[Qvar ** 2]])
    # Q1 = numpy.atleast_2d(F3[:2, 2]).T @ f_var @ numpy.atleast_2d(F3[:2, 2])

    R1 = numpy.eye(2) * Rvar ** 2
    G2 = calc_G(Am1, B2, dt)
    Q1 = G2 @ f_var @ G2.T

    perfect = OwnKF(2, 2, 2)
    perfect.assign_discrete(F1, Q1, H1, R1, P1, lin_m0, G2)
    sav = Saver(perfect)

    def ufunc(in_flows):
        """The ufunc in the filter should return the deviation variable form of the inputs of the given run's
        input func. Recieves inputs in tall array (time varies by row and inputs vary by column) must return in
        wide format."""
        return in_flows - nom_F # works with time by row arrays in and out

    return perfect, sav, ufunc
    def _fitTheModel(self, saveDirectory):
        '''Apply the Kalman Filter to estimate the hidden state at time t for t = [0...ntimesteps-1] given observations up to and including time t'''

        # Initialize the parameters:
        self._defineModelParameters()

        # Create Saver object
        self.saverObject = Saver(self.kalmanFilter)

        # Loop the portfolio dict:
        for eachAssetName, eachAssetDataFrame in self.PORTFOLIO._portfolioDict.items(
        ):

            # Create a container for the looping:
            self.means = []

            # Re-initialize the parameters:
            self._defineModelParameters()

            # Loop each dataframe row:
            for eachIndex, eachRow in eachAssetDataFrame.iterrows():

                # Just perform the standard predict/update loop
                self.kalmanFilter.predict()
                self.kalmanFilter.update(eachRow['Returns'])

                # Append to list:
                self.means.append(self.kalmanFilter.x[0])

            # Create the new column in the dataframe:
            eachAssetDataFrame['KFValue'] = self.means
            #newDataFrame = eachAssetDataFrame.copy()

            # Save the filter values for further reference:
            self._saveModel(eachAssetName, saveDirectory)

        # Print:
        logger.warning(self.PORTFOLIO._portfolioDict['WS30'])
Ejemplo n.º 22
0
    def smooth(self, t, z):

        dt = t[1:] - t[:-1]
        if np.any(dt < 0):
            raise ValueError('Times must be sorted in accedning order.')

        t = t.copy()
        z = z.copy()

        if self.kf.x is None:
            self.kf.x = self.init_x(z[0])

        s = Saver(self.kf)
        #s.save()

        for i, ti in enumerate(t[1:]):
            self.kf.predict(dt=dt[i])
            self.kf.update(z[i + 1], epoch=ti)
            s.save()

        s.to_array()
        xs, _, _ = self.kf.rts_smoother(s.x, s.P)
        return xs[:, :6]
Ejemplo n.º 23
0
def test_linear_rts():
    """ for a linear model the Kalman filter and UKF should produce nearly
    identical results.

    Test code mostly due to user gboehl as reported in GitHub issue #97, though
    I converted it from an AR(1) process to constant velocity kinematic
    model.
    """
    dt = 1.0
    F = np.array([[1., dt], [.0, 1]])
    H = np.array([[1., .0]])

    def t_func(x, dt):
        F = np.array([[1., dt], [.0, 1]])
        return np.dot(F, x)

    def o_func(x):
        return np.dot(H, x)

    sig_t = .1  # peocess
    sig_o = .00000001  # measurement

    N = 50
    X_true, X_obs = [], []

    for i in range(N):
        X_true.append([i + 1, 1.])
        X_obs.append(i + 1 + np.random.normal(scale=sig_o))

    X_true = np.array(X_true)
    X_obs = np.array(X_obs)

    oc = np.ones((1, 1)) * sig_o**2
    tc = np.zeros((2, 2))
    tc[1, 1] = sig_t**2

    tc = Q_discrete_white_noise(dim=2, dt=dt, var=sig_t**2)
    points = MerweScaledSigmaPoints(n=2, alpha=.1, beta=2., kappa=1)

    ukf = UKF(dim_x=2, dim_z=1, dt=dt, hx=o_func, fx=t_func, points=points)
    ukf.x = np.array([0., 1.])
    ukf.R = oc[:]
    ukf.Q = tc[:]
    s = Saver(ukf)
    s.save()
    s.to_array()

    kf = KalmanFilter(dim_x=2, dim_z=1)
    kf.x = np.array([[0., 1]]).T
    kf.R = oc[:]
    kf.Q = tc[:]
    kf.H = H[:]
    kf.F = F[:]

    mu_ukf, cov_ukf = ukf.batch_filter(X_obs)
    x_ukf, _, _ = ukf.rts_smoother(mu_ukf, cov_ukf)

    mu_kf, cov_kf, _, _ = kf.batch_filter(X_obs)
    x_kf, _, _, _ = kf.rts_smoother(mu_kf, cov_kf)

    # check results of filtering are correct
    kfx = mu_kf[:, 0, 0]
    ukfx = mu_ukf[:, 0]
    kfxx = mu_kf[:, 1, 0]
    ukfxx = mu_ukf[:, 1]

    dx = kfx - ukfx
    dxx = kfxx - ukfxx

    # error in position should be smaller then error in velocity, hence
    # atol is different for the two tests.
    assert np.allclose(dx, 0, atol=1e-7)
    assert np.allclose(dxx, 0, atol=1e-6)

    # now ensure the RTS smoothers gave nearly identical results
    kfx = x_kf[:, 0, 0]
    ukfx = x_ukf[:, 0]
    kfxx = x_kf[:, 1, 0]
    ukfxx = x_ukf[:, 1]

    dx = kfx - ukfx
    dxx = kfxx - ukfxx

    assert np.allclose(dx, 0, atol=1e-7)
    assert np.allclose(dxx, 0, atol=1e-6)
    return ukf
Ejemplo n.º 24
0
    data["q_prior"], data["S_prior"] = abc_simulate(data["r_obs"])

    # --- INIT FILTER --- #
    ukf = init_filter(
        S0=5.74,
        S_var=1,
        Q00_s_noise=Q00_s_noise,
        R=R,
        h=hx,
        f=abc,
        params=parameters,
        alpha=alpha,
        beta=beta,
        kappa=kappa,
    )
    s = Saver(ukf)

    # --- RUN FILTER --- #
    # TODO: how include measurement accuracy of rainfall ?
    for z, r in np.vstack([data["q_obs"], data["r_obs"]]).T:
        fx_args = hx_args = {"r": r}  #  rainfall inputs to the model

        # predict
        ukf.predict(**fx_args)

        # update
        ukf.update(z, **hx_args)

        s.save()

    # save the output data
Ejemplo n.º 25
0
    R: float = 0.01,
    S0: float = 5.74,
    observe_every: int = 1,
) -> Tuple[KalmanFilter, Saver, pd.DataFrame]:
    # ------ INIT FILTER ------
    kf = init_filter(
        r0=data["r_obs"][0],
        S0=S0,
        s_variance=s_variance,
        r_variance=r_variance,
        s_noise=s_noise,
        r_noise=r_noise,
        R=R,
    )

    s = Saver(kf)

    # ------ RUN FILTER ------
    if observe_every > 1:
        data.loc[data.index % observe_every != 0, "q_obs"] = np.nan

    # Iterate over the Kalman Filter
    for time_ix, z in enumerate(np.vstack([data["q_obs"], data["r_obs"]]).T):
        kf.predict()
        # only make update steps every n timesteps
        if time_ix % observe_every == 0:
            kf.update(z)
        s.save()

    s.to_array()
Ejemplo n.º 26
0
import matplotlib.pyplot as plt
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise, Saver
r_std, q_std = 2., 0.003
cv = KalmanFilter(dim_x=2, dim_z=1)
cv.x = np.array([[0., 1.]]) # position, velocity
cv.F = np.array([[1, dt],[0, 1]])
cv.R = np.array([[r_std*r_std]])
f.H = np.array([[1., 0.]])
f.P = np.diag([0.1*0.1, 0.03*0.03])
f.Q = Q_discrete_white_noise(2, dt, q_std**2)
saver = Saver(cv)
for z in range(100):
    cv.predict()
    cv.update([z + randn() * r_std])
    saver.save() # save the filter's state
saver.to_array()
plt.plot(saver.x[:, 0])
# plot all of the priors
plt.plot(saver.x_prior[:, 0])
# plot mahalanobis distance
plt.figure()
plt.plot(saver.mahalanobis)
Ejemplo n.º 27
0
def test_ekf():
    def H_of(x):
        """ compute Jacobian of H matrix for state x """

        horiz_dist = x[0]
        altitude = x[2]

        denom = sqrt(horiz_dist**2 + altitude**2)
        return array([[horiz_dist / denom, 0., altitude / denom]])

    def hx(x):
        """ takes a state variable and returns the measurement that would
        correspond to that state.
        """

        return sqrt(x[0]**2 + x[2]**2)

    dt = 0.05
    proccess_error = 0.05

    rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)

    rk.F = eye(3) + array([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) * dt

    def fx(x, dt):
        return np.dot(rk.F, x)

    rk.x = array([-10., 90., 1100.])
    rk.R *= 10
    rk.Q = array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]) * 0.001

    rk.P *= 50

    rs = []
    xs = []
    radar = RadarSim(dt)
    ps = []
    pos = []

    s = Saver(rk)
    for i in range(int(20 / dt)):
        z = radar.get_range(proccess_error)
        pos.append(radar.pos)

        rk.update(asarray([z]), H_of, hx, R=hx(rk.x) * proccess_error)
        ps.append(rk.P)
        rk.predict()

        xs.append(rk.x)
        rs.append(z)
        s.save()

        # test mahalanobis
        a = np.zeros(rk.y.shape)
        maha = scipy_mahalanobis(a, rk.y, rk.SI)
        assert rk.mahalanobis == approx(maha)

    s.to_array()

    xs = asarray(xs)
    ps = asarray(ps)
    rs = asarray(rs)

    p_pos = ps[:, 0, 0]
    p_vel = ps[:, 1, 1]
    p_alt = ps[:, 2, 2]
    pos = asarray(pos)

    if DO_PLOT:
        plt.subplot(311)
        plt.plot(xs[:, 0])
        plt.ylabel('position')

        plt.subplot(312)
        plt.plot(xs[:, 1])
        plt.ylabel('velocity')

        plt.subplot(313)
        #plt.plot(xs[:,2])
        #plt.ylabel('altitude')

        plt.plot(p_pos)
        plt.plot(-p_pos)
        plt.plot(xs[:, 0] - pos)
Ejemplo n.º 28
0
def test_noisy_1d():
    f = KalmanFilter (dim_x=2, dim_z=1)

    f.x = np.array([[2.],
                    [0.]])       # initial state (location and velocity)

    f.F = np.array([[1.,1.],
                    [0.,1.]])    # state transition matrix

    f.H = np.array([[1.,0.]])    # Measurement function
    f.P *= 1000.                  # covariance matrix
    f.R *= 5                       # state uncertainty
    f.Q *= 0.0001                 # process uncertainty

    fsq = SquareRootKalmanFilter (dim_x=2, dim_z=1)

    fsq.x = np.array([[2.],
                      [0.]])     # initial state (location and velocity)

    fsq.F = np.array([[1.,1.],
                      [0.,1.]])  # state transition matrix

    fsq.H = np.array([[1.,0.]])  # Measurement function
    fsq.P = np.eye(2) * 1000.    # covariance matrix
    fsq.R *= 5                    # state uncertainty
    fsq.Q *= 0.0001               # process uncertainty


    # does __repr__ work?
    str(fsq)

    measurements = []
    results = []

    zs = []
    s = Saver(fsq)
    for t in range (100):
        # create measurement = t plus white noise
        z = t + random.randn()*20
        zs.append(z)

        # perform kalman filtering
        f.update(z)
        f.predict()

        fsq.update(z)
        fsq.predict()

        assert abs(f.x[0,0] - fsq.x[0,0]) < 1.e-12
        assert abs(f.x[1,0] - fsq.x[1,0]) < 1.e-12

        # save data
        results.append (f.x[0,0])
        measurements.append(z)
        s.save()
    s.to_array()

    for i in range(f.P.shape[0]):
        assert abs(f.P[i,i] - fsq.P[i,i]) < 0.01


    # now do a batch run with the stored z values so we can test that
    # it is working the same as the recursive implementation.
    # give slightly different P so result is slightly different
    f.x = np.array([[2.,0]]).T
    f.P = np.eye(2)*100.
    m,c,_,_ = f.batch_filter(zs,update_first=False)

    # plot data
    if DO_PLOT:
        p1, = plt.plot(measurements,'r', alpha=0.5)
        p2, = plt.plot (results,'b')
        p4, = plt.plot(m[:,0], 'm')
        p3, = plt.plot ([0,100],[0,100], 'g') # perfect result
        plt.legend([p1,p2, p3, p4],
                   ["noisy measurement", "KF output", "ideal", "batch"], loc=4)


        plt.show()
Ejemplo n.º 29
0
def run_ukf(
    data: pd.DataFrame,
    S0_est: float,
    s_variance: float,
    r_variance: Optional[float],
    Q00_s_noise: float,
    Q11_r_noise: Optional[float],
    R: float,
    params: Dict[str, float],
    alpha: float,
    beta: float,
    kappa: float,
    dimension: int = 2,
    observe_every: int = 1,
) -> Tuple[UKF.UnscentedKalmanFilter, pd.DataFrame, Saver]:
    # --- INIT FILTER --- #
    if dimension == 1:
        ukf = init_filter(
            S0=S0_est,
            s_variance=s_variance,
            Q00_s_noise=Q00_s_noise,
            R=R,
            h=hx,
            f=abc,
            params=params,  #  dict(a=a_est, b=b_est, c=c_est)
            alpha=alpha,
            beta=beta,
            kappa=kappa,
        )
    elif dimension == 2:
        ukf = init_2D_filter(
            S0=S0_est,
            r0=data["r_obs"][0],
            s_variance=s_variance,
            r_variance=r_variance,
            Q00_s_noise=Q00_s_noise,
            Q11_r_noise=Q11_r_noise,
            R=R,
            params=params,
            alpha=alpha,
            beta=beta,
            kappa=kappa,
        )

    s = Saver(ukf)

    # --- RUN FILTER --- #
    for time_ix, (q, r) in enumerate(np.vstack([data["q_obs"], data["r_obs"]]).T):
        # NOTE: z is the discharge (q)
        if dimension == 1:
            # TODO: how include measurement accuracy of rainfall ?
            fx_args = hx_args = {"r": r}  #  rainfall inputs to the model

            # predict
            ukf.predict(**fx_args)

            # update
            if time_ix % observe_every == 0:
                ukf.update(q, **hx_args)

        elif dimension == 2:
            z2d = np.array([q, r])
            ukf.predict()

            if time_ix % observe_every == 0:
                ukf.update(z2d)

        else:
            assert False, "Have only implemented [1, 2] dimensions"

        s.save()

    # save the output data
    s.to_array()
    data_ukf = update_data_columns(data.copy(), s, dimension=dimension)

    return ukf, data_ukf, s
Ejemplo n.º 30
0
def test_imm():
    """ this test is drawn from Crassidis [1], example 4.6.

    ** References**

    [1] Crassidis. "Optimal Estimation of Dynamic Systems", CRC Press,
    Second edition.
    """

    r = 1.
    dt = 1.
    phi_sim = np.array([[1, dt, 0, 0], [0, 1, 0, 0], [0, 0, 1, dt],
                        [0, 0, 0, 1]])

    gam = np.array([[dt**2 / 2, 0], [dt, 0], [0, dt**2 / 2], [0, dt]])

    x = np.array([[2000, 0, 10000, -15.]]).T

    simxs = []
    N = 600
    for i in range(N):
        x = np.dot(phi_sim, x)
        if i >= 400:
            x += np.dot(gam, np.array([[.075, .075]]).T)
        simxs.append(x)
    simxs = np.array(simxs)
    #x = np.genfromtxt('c:/users/rlabbe/dropbox/Crassidis/mycode/x.csv', delimiter=',')

    zs = np.zeros((N, 2))
    for i in range(len(zs)):
        zs[i, 0] = simxs[i, 0] + randn() * r
        zs[i, 1] = simxs[i, 2] + randn() * r

    try:
        #data to test against crassidis' IMM matlab code
        zs_tmp = np.genfromtxt(
            'c:/users/rlabbe/dropbox/Crassidis/mycode/xx.csv',
            delimiter=',')[:-1]
        zs = zs_tmp
    except:
        pass
    ca = KalmanFilter(6, 2)
    cano = KalmanFilter(6, 2)
    dt2 = (dt**2) / 2
    ca.F = np.array([[1, dt, dt2, 0, 0, 0], [0, 1, dt, 0, 0, 0],
                     [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, dt, dt2],
                     [0, 0, 0, 0, 1, dt], [0, 0, 0, 0, 0, 1]])
    cano.F = ca.F.copy()

    ca.x = np.array([[2000., 0, 0, 10000, -15, 0]]).T
    cano.x = ca.x.copy()

    ca.P *= 1.e-12
    cano.P *= 1.e-12
    ca.R *= r**2
    cano.R *= r**2
    cano.Q *= 0
    q = np.array([[.05, .125, 1 / 6], [.125, 1 / 3, .5], [1 / 6, .5, 1]
                  ]) * 1.e-3

    ca.Q[0:3, 0:3] = q
    ca.Q[3:6, 3:6] = q

    ca.H = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])
    cano.H = ca.H.copy()

    filters = [ca, cano]

    trans = np.array([[0.97, 0.03], [0.03, 0.97]])

    bank = IMMEstimator(filters, (0.5, 0.5), trans)

    # ensure __repr__ doesn't have problems
    str(bank)

    xs, probs = [], []
    cvxs, caxs = [], []
    s = Saver(bank)
    for i, z in enumerate(zs[0:10]):
        z = np.array([z]).T
        bank.update(z)
        #print(ca.likelihood, cano.likelihood)
        #print(ca.x.T)
        xs.append(bank.x.copy())
        cvxs.append(ca.x.copy())
        caxs.append(cano.x.copy())
        #print(i, ca.likelihood, cano.likelihood, bank.w)

        #print('p', bank.p)
        probs.append(bank.mu.copy())
        s.save()
    s.to_array()

    if DO_PLOT:
        xs = np.array(xs)
        cvxs = np.array(cvxs)
        caxs = np.array(caxs)
        probs = np.array(probs)
        plt.subplot(121)
        plt.plot(xs[:, 0], xs[:, 3], 'k')
        #plt.plot(cvxs[:, 0], caxs[:, 3])
        #plt.plot(simxs[:, 0], simxs[:, 2], 'g')
        plt.scatter(zs[:, 0], zs[:, 1], marker='+', alpha=0.2)

        plt.subplot(122)
        plt.plot(probs[:, 0])
        plt.plot(probs[:, 1])
        plt.ylim(-1.5, 1.5)
        plt.title('probability ratio p(cv)/p(ca)')
        '''plt.figure()
        plt.plot(cvxs, label='CV')
        plt.plot(caxs, label='CA')
        plt.plot(xs[:, 0], label='GT')
        plt.legend()

        plt.figure()
        plt.plot(xs)
        plt.plot(xs[:, 0])'''

        return bank