Exemplo n.º 1
0
def run_filter(list_args: List[float],
               data: pd.DataFrame) -> Tuple[KalmanFilter, Saver]:

    if isinstance(data, tuple):
        #  data is the first *args
        data = data[0]

    # store the parameters in the list_args
    s_noise_Q00, r_noise_Q11, q_meas_error_R00, r_meas_error_R11 = list_args

    kf = init_filter(
        r0=0.0,
        S0=5.74,
        s_variance_P00=1,
        r_variance_P11=100,
        s_noise_Q00=s_noise_Q00,
        r_noise_Q11=r_noise_Q11,
        q_meas_error_R00=q_meas_error_R00,
        r_meas_error_R11=r_meas_error_R11,
    )

    s = Saver(kf)
    lls = []

    for z in np.vstack([data["q_obs"], data["r_obs"]]).T:
        kf.predict()
        kf.update(z)
        log_likelihood = kf.log_likelihood_of(z)
        lls.append(log_likelihood)
        s.save()

    s.to_array()
    lls = np.array(lls)

    return kf, s, lls
Exemplo n.º 2
0
def test_functions():

    x, P = predict(x=10., P=3., u=1., Q=2.**2)
    x, P = update(x=x, P=P, z=12., R=3.5**2)

    x, P = predict(x=np.array([10.]), P=np.array([[3.]]), Q=2.**2)
    x, P = update(x=x, P=P, z=12., H=np.array([[1.]]), R=np.array([[3.5**2]]))

    x = np.array([1., 0])
    P = np.diag([1., 1])
    Q = np.diag([0., 0])
    H = np.array([[1., 0]])

    x, P = predict(x=x, P=P, Q=Q)

    assert x.shape == (2, )
    assert P.shape == (2, 2)

    x, P = update(x, P, z=[1], R=np.array([[1.]]), H=H)

    assert x[0] == 1 and x[1] == 0

    # test velocity predictions
    x, P = predict(x=x, P=P, Q=Q)
    assert x[0] == 1 and x[1] == 0

    x[1] = 1.
    F = np.array([[1., 1], [0, 1]])

    x, P = predict(x=x, F=F, P=P, Q=Q)
    assert x[0] == 2 and x[1] == 1

    x, P = predict(x=x, F=F, P=P, Q=Q)
    assert x[0] == 3 and x[1] == 1
Exemplo n.º 3
0
def test_functions():

    x, P = predict(x=10., P=3., u=1., Q=2.**2)
    x, P = update(x=x, P=P, z=12., R=3.5**2)

    x, P = predict(x=np.array([10.]), P=np.array([[3.]]), Q=2.**2)
    x, P = update(x=x, P=P, z=12., H=np.array([[1.]]), R=np.array([[3.5**2]]))

    x = np.array([1., 0])
    P = np.diag([1., 1])
    Q = np.diag([0., 0])
    H = np.array([[1., 0]])

    x, P = predict(x=x, P=P, Q=Q)

    assert x.shape == (2,)
    assert P.shape == (2, 2)

    x, P = update(x, P, z=[1], R=np.array([[1.]]), H=H)

    assert x[0] == 1 and x[1] == 0

    # test velocity predictions
    x, P = predict(x=x, P=P, Q=Q)
    assert x[0] == 1 and x[1] == 0

    x[1] = 1.
    F = np.array([[1., 1], [0, 1]])

    x, P = predict(x=x, F=F, P=P, Q=Q)
    assert x[0] == 2 and x[1] == 1

    x, P = predict(x=x, F=F, P=P, Q=Q)
    assert x[0] == 3 and x[1] == 1
Exemplo n.º 4
0
    def get_predictions(self):
        """
        :description
            Starting from the current position, make predictions and propagate using the Kalman transition matrix (defined
            by our model) to make predictions on futur positions.
        :return:
            ([x1, y1], [x2, y2], ...)  predictions     -- a list of size NUMBER_PREDICTIONS containing the
                                                           predicted positions of the target beeing tracked
        """
        predictions = []
        # only return if still seeing the object
        if (not len(self.kalman_memory) < 2) and (
                self.kalman_memory[-2][2] - self.kalman_memory[-1][2] > 3):
            return predictions

        current_state = self.filter.x
        current_P = self.filter.P
        dt = 0.4
        try:
            for _ in range(NUMBER_PREDICTIONS):
                new_state, new_P = predict(current_state, current_P,
                                           self.filter_model.model_F(dt),
                                           self.filter.Q)
                predictions.append((new_state, new_P))
                current_state, current_P = update(
                    new_state, new_P,
                    new_state[0:KALMAN_MODEL_MEASUREMENT_DIM], self.filter.R,
                    self.filter.H)
        except ValueError:
            print("error in prediction")
            import sys
            print(sys.exc_info())
            print("current_state: ", current_state)

        return predictions
Exemplo n.º 5
0
def update_mixture(xs, Ps, ws, z, R, H, Pd, lamc):
    '''
    xs - list of state
    Ps - list of state covariance matrices
    ws - list of weights
    z - list of measurments 
    R - measurement noise matrix
    H - measurement matrix 
    Pd - probability of detection
    lamc - clutter intensity function handle
    '''
    hypotheses = range(0, len(z) + 1)
    ws_u, xs_u, Ps_u = [],[],[]
    for x_prev, P_prev, w_prev in zip(xs, Ps, ws):
        for h in hypotheses:
            if h == 0:
                weight = w_prev * (1 - Pd)
                ws_u.append(weight)
                xs_u.append(x_prev)
                Ps_u.append(P_prev)
            else:
                zt = z[h - 1]
                weight = w_prev * Pd * predict_measurement(x_prev, P_prev, zt, R, H) / lamc(zt)
                x_u, P_u = update(x_prev, P_prev, zt, R, H)
                ws_u.append(weight)
                xs_u.append(x_u)
                Ps_u.append(P_u)

    # normalize weights
    ws_u = ws_u / np.sum(ws_u)
    return xs_u, Ps_u, ws_u
    def get_current_rssi(self):
        if len(self.rssihistory) == 0:
            return -256, -256

        self.x, self.P = kf.predict(x=self.x, P=self.P, u=0, Q=2) # rely more on the prediction by using a high Q variance
        self.x, self.P = kf.update(x=self.x, P=self.P, z=self.rssihistory[-1], R=1) # measurement variance could be up to 1 per RSSI read

        estimated_rssi = self.x

        #print self.timehistory[-1], estimated_rssi, self.rssihistory[-1]
        return estimated_rssi, self.rssihistory[-1]
Exemplo n.º 7
0
def test_procedure_form():

    dt = 1.
    std_z = 10.1

    x = np.array([[0.], [0.]])
    F = np.array([[1., dt], [0., 1.]])
    H = np.array([[1.,0.]])
    P = np.eye(2)
    R = np.eye(1)*std_z**2
    Q = Q_discrete_white_noise(2, dt, 5.1)

    kf = KalmanFilter(2, 1)
    kf.x = x.copy()
    kf.F = F.copy()
    kf.H = H.copy()
    kf.P = P.copy()
    kf.R = R.copy()
    kf.Q = Q.copy()


    measurements = []
    results = []

    xest = []
    ks = []
    pos = 0.
    for t in range (2000):
        z = pos + random.randn() * std_z
        pos += 100

        # perform kalman filtering
        x, P = predict(x, P, F, Q)
        kf.predict()
        assert norm(x - kf.x) < 1.e-12
        x, P, _, _, _, _ = update(x, P, z, R, H, True)
        kf.update(z)
        assert norm(x - kf.x) < 1.e-12

        # save data
        xest.append (x.copy())
        measurements.append(z)

    xest = np.asarray(xest)
    measurements = np.asarray(measurements)
    # plot data
    if DO_PLOT:
        plt.plot(xest[:, 0])
        plt.plot(xest[:, 1])
        plt.plot(measurements)
Exemplo n.º 8
0
    def update(self, pos, vel):
        """Update object position and filter
        Inputs:
            pos -- position of object (cartesian)
            vel -- veloicty of object (cartesian)
#           hist_score -- certainty score based on object history (used as scale factor for measurement covariance) (range 1 - 1.05)
        """
        measurement = np.append(pos, vel).astype(np.float32)

        self.state, self.covar = kalman.update(x=self.state,
                                               P=self.covar,
                                               z=measurement,
                                               R=self.measurement_covar,
                                               H=self.measurement_trans)
Exemplo n.º 9
0
def test_procedure_form():

    dt = 1.
    std_z = 10.1

    x = np.array([[0.], [0.]])
    F = np.array([[1., dt], [0., 1.]])
    H = np.array([[1., 0.]])
    P = np.eye(2)
    R = np.eye(1) * std_z**2
    Q = Q_discrete_white_noise(2, dt, 5.1)

    kf = KalmanFilter(2, 1)
    kf.x = x.copy()
    kf.F = F.copy()
    kf.H = H.copy()
    kf.P = P.copy()
    kf.R = R.copy()
    kf.Q = Q.copy()

    measurements = []
    results = []

    xest = []
    ks = []
    pos = 0.
    for t in range(2000):
        z = pos + random.randn() * std_z
        pos += 100

        # perform kalman filtering
        x, P = predict(x, P, F, Q)
        kf.predict()
        assert norm(x - kf.x) < 1.e-12
        x, P, _, _, _, _ = update(x, P, z, R, H, True)
        kf.update(z)
        assert norm(x - kf.x) < 1.e-12

        # save data
        xest.append(x.copy())
        measurements.append(z)

    xest = np.asarray(xest)
    measurements = np.asarray(measurements)
    # plot data
    if DO_PLOT:
        plt.plot(xest[:, 0])
        plt.plot(xest[:, 1])
        plt.plot(measurements)
Exemplo n.º 10
0
def kalman_filter(data, sensor_var, process_var):
    x0 = (data[0], 1)
    velocity = 0
    dt = 1.
    sensor_var = sensor_var
    process_var = process_var
    process_model = (velocity*dt, process_var)
    
    x = x0
    
    
    xs, predictions = [], []
    for z in data:
        prior = predict(x, process_model)
        #print(prior)
        likelihood = (z, sensor_var)
        x = update(prior, likelihood)

        # save results
        predictions.append(prior[0])
        xs.append(x[0])
    return predictions, xs
Exemplo n.º 11
0
def proc_form():
    """ This is for me to run against the class_form() function to see which,
    if either, runs faster. They within a few ms of each other on my machine
    with Python 3.5.1"""

    dt = 1.
    std_z = 10.1

    x = np.array([[0.], [0.]])
    F = np.array([[1., dt], [0., 1.]])
    H = np.array([[1., 0.]])
    P = np.eye(2)
    R = np.eye(1)*std_z**2
    Q = Q_discrete_white_noise(2, dt, 5.1)

    pos = 0.
    for t in range(2000):
        z = pos + random.randn() * std_z
        pos += 100

        # perform kalman filtering
        x, P = predict(x, P, F, Q)
        x, P, _ = update(z, R, x, P, H)
Exemplo n.º 12
0
def proc_form():
    """ This is for me to run against the class_form() function to see which,
    if either, runs faster. They within a few ms of each other on my machine
    with Python 3.5.1"""

    dt = 1.
    std_z = 10.1

    x = np.array([[0.], [0.]])
    F = np.array([[1., dt], [0., 1.]])
    H = np.array([[1., 0.]])
    P = np.eye(2)
    R = np.eye(1) * std_z**2
    Q = Q_discrete_white_noise(2, dt, 5.1)

    pos = 0.
    for t in range(2000):
        z = pos + random.randn() * std_z
        pos += 100

        # perform kalman filtering
        x, P = predict(x, P, F, Q)
        x, P, _ = update(z, R, x, P, H)
Exemplo n.º 13
0
    def kalman_filter(z):

        global x
        global process_model

        # -------- PREDICT --------- #
        # X is the state of the system
        # P is the variance of the system
        # u is the movement of the system due to the process
        # Q is the noise of the process
        x, P = kf.predict(x=x, P=process_model)
        # sensor says z with a standard deviation of sensorvar**2
        # probability of the measurement given the current state
        #likelihood = gaussian(z, mean(z), sensor_var)

        # -------- UPDATE --------- #
        # X is the state of the system
        # P is the variance of the system
        # z is the measurement
        # R is the measurement variance
        x, P = kf.update(x=x, P=P, z=z, R=sensor_var)
        # print( "x: ",'%.3f' % x, "var: " '%.3f' % P, "z: ", '%.3f' % z)
        #print(x)
        return x
Exemplo n.º 14
0
V = np.zeros(shape=(25, 2))
Z = np.zeros(shape=(25, 2))
Q = np.array([[0.0001, 0.00002], [0.00002, 0.0001]])
R = np.array([[0.01, 0.005], [0.005, 0.02]])
x = x2 = np.array([[0.,0.]]).T
H = B = P = np.eye(2)


# P = 0.0001*P         #use this when estimate case 2

with open('data.csv', newline='') as csvfile:
    reader = csv.reader(csvfile, delimiter=' ')
    for i, row in enumerate(reader):
        a = row[0].split(',')
        V[i] = [float(a[0]), float(a[1])]
        Z[i] = [float(a[2]), float(a[3])]

result = []
for i in range(25):
    u = np.array([[V[i][0]], [V[i][1]]])
    z = np.array([[Z[i][0]], [Z[i][1]]])
    x, P = predict(x, P, F=1, Q=Q, u=u, B=B)
    x, P = update(x, P, z, R, H)
    result.append((x[:2]).tolist())

plt.plot(Z.T[0], Z.T[1], 'ro')
x1, x2 = zip(*result)
plt.plot(x1, x2, 'b-')
# plt.plot(x21, x22, '-')

plt.show()
Exemplo n.º 15
0
        r_noise=r_noise,
        R=R,
    )

    s = Saver(kf)

    # ------ RUN FILTER ------
    if observe_every > 1:
        data.loc[data.index % observe_every != 0, "q_obs"] = np.nan

    # Iterate over the Kalman Filter
    for time_ix, z in enumerate(np.vstack([data["q_obs"], data["r_obs"]]).T):
        kf.predict()
        # only make update steps every n timesteps
        if time_ix % observe_every == 0:
            kf.update(z)
        s.save()

    s.to_array()

    # only observe every n values
    # data["q_true_original"] = data["q_true"]

    # update data with POSTERIOR estimates
    # Calculate the DISCHARGE (measurement operator * \bar{x})
    data["q_filtered"] = (s.H @ s.x)[:, 0]
    data["q_variance"] = (s.H @ s.P)[:, 0, 0]

    return kf, s, data

Exemplo n.º 16
0
                         [x[j] - x[j-1]], 
                         [y[j] - y[j-1]]])

    # If near constant acceleration model, measure position, velocity and acceleration.
    elif args.model == 'nca':
        meas = np.array([[x[j]], 
                         [y[j]], 
                         [x[j]-x[j-1]], 
                         [y[j]-y[j-1]], 
                         [(x[j] - x[j-1]) - (x[j-1] - x[j-2])], 
                         [(y[j] - y[j-1]) - (y[j-1] - y[j-2])]])
    
    # If doing sanity check, use library. Else use provided kalman_step function.
    if args.sanity_check:
        state, covariance = predict(state, covariance, A, Q_i)
        state, covariance = update(state, covariance, meas, R_i, C)
    else:
        state, covariance, _ , _ = kalman_step(A, C, Q_i, R_i, meas, state, covariance)

    
    # Get position (velocity, acceleration) from deduced state.
    sx[j] = state[0]
    sy[j] = state[1]
    if args.model in {'ncv', 'nca'}:
        sx_d[j] = state[2]
        sy_d[j] = state[3]
        if args.model == 'nca':
            sx_dd[j] = state[4]
            sy_dd[j] = state[5]

Exemplo n.º 17
0
def run_1D_filter(data: pd.DataFrame,
                  R: float,
                  Q: float,
                  P0: float,
                  S0: float = 5.74) -> pd.DataFrame:
    """Run a simple 1D Kalman Filter on noisy simulated data.

    Initialize:
        We have an initial estimate of the state variance (`P0`).
        We use the first 'measurement' as our initial estimate (`X[0] = z[0]`)
    Predict:
        We require an initial input storage value - `S0`.
        We use the ABC model as our process model to produce a prediction of X (discharge).
        We assume the error/variance of this process to be `Q` (process error).
    Update:
        We use noisy observations (simulated from the ABCModel) as measurements `z`.
        We assume the noise on the simulated data to be `R` (measurement error).
        Our filtered state (X)

    Note:
        - The ABC model has an internal state parameter (S[t]) that we are currently not using.
        - The ABC model produces a direct estimate of X, rather than the change in X. We
         calculate the change in x (`dx`) as `dx = Qsim - X`, i.e. the current estimate
         minus the previous (filtered) estimate.
        - The improved state (X = discharge) estimated by the filter is not fed back into the
         ABC model.

    We use the `filterpy.kalman` implementation of the Kalman Filter.

    Args:
        data (pd.DataFrame): driving data, including precipitation (for driving the ABCmodel) &
            the noisy Qsim values. Must have columns: ['precip', 'qsim', 'qsim_noise', 'ssim']
        R (float): The measurement noise (because we have simulated data this is known)
        Q (float): The process noise (variance / uncertainty on the ABC Model estimate)
        P0 (float): The initial estimate for the state noise (variance on the prior / state uncertainty)
        S0 (float, optional): [description]. Defaults to 5.74.

    Returns:
        pd.DataFrame: [description]
    """
    assert all(np.isin(["precip", "qsim", "qsim_noise", "ssim"], data.columns))

    measured_values = []
    predicted_values = []
    filtered_values = []

    kalman_gains = []
    log_likelihoods = []
    residuals = []

    # initialize step
    P = P0
    X = data["qsim_noise"][0]
    storage = S0

    # Kalman Filtered Qsim
    for ix, precip in enumerate(data["precip"]):
        # predict step
        Qsim, storage = abcmodel(storage, precip)  # process model (simulate Q)
        dx = Qsim - X  #  convert into a change (dx)
        X, P = kf.predict(x=X, P=P, u=dx, Q=Q)
        predicted_values.append(X)

        # update step
        z = data["qsim_noise"][ix]  # measurement
        X, P, y, K, S, log_likelihood = kf.update(x=X,
                                                  P=P,
                                                  z=z,
                                                  R=R,
                                                  return_all=True)

        filtered_values.append(X)
        measured_values.append(z)

        kalman_gains.append(float(K))
        log_likelihoods.append(float(log_likelihood))
        residuals.append(float(y))

    out = pd.DataFrame({
        "measured": measured_values,
        "predicted": predicted_values,
        "filtered": filtered_values,
        "unobserved": data["qsim"],
        "kalman_gain": kalman_gains,
        "log_likelihood": log_likelihoods,
        "residual": residuals,
    })

    return out