def test_functions(): x, P = predict(x=10., P=3., u=1., Q=2.**2) x, P = update(x=x, P=P, z=12., R=3.5**2) x, P = predict(x=np.array([10.]), P=np.array([[3.]]), Q=2.**2) x, P = update(x=x, P=P, z=12., H=np.array([[1.]]), R=np.array([[3.5**2]])) x = np.array([1., 0]) P = np.diag([1., 1]) Q = np.diag([0., 0]) H = np.array([[1., 0]]) x, P = predict(x=x, P=P, Q=Q) assert x.shape == (2,) assert P.shape == (2, 2) x, P = update(x, P, z=[1], R=np.array([[1.]]), H=H) assert x[0] == 1 and x[1] == 0 # test velocity predictions x, P = predict(x=x, P=P, Q=Q) assert x[0] == 1 and x[1] == 0 x[1] = 1. F = np.array([[1., 1], [0, 1]]) x, P = predict(x=x, F=F, P=P, Q=Q) assert x[0] == 2 and x[1] == 1 x, P = predict(x=x, F=F, P=P, Q=Q) assert x[0] == 3 and x[1] == 1
def test_procedure_form(): dt = 1. std_z = 10.1 x = np.array([[0.], [0.]]) F = np.array([[1., dt], [0., 1.]]) H = np.array([[1.,0.]]) P = np.eye(2) R = np.eye(1)*std_z**2 Q = Q_discrete_white_noise(2, dt, 5.1) kf = KalmanFilter(2, 1) kf.x = x.copy() kf.F = F.copy() kf.H = H.copy() kf.P = P.copy() kf.R = R.copy() kf.Q = Q.copy() measurements = [] results = [] xest = [] ks = [] pos = 0. for t in range (2000): z = pos + random.randn() * std_z pos += 100 # perform kalman filtering x, P = predict(x, P, F, Q) kf.predict() assert norm(x - kf.x) < 1.e-12 x, P, _, _, _, _ = update(x, P, z, R, H, True) kf.update(z) assert norm(x - kf.x) < 1.e-12 # save data xest.append (x.copy()) measurements.append(z) xest = np.asarray(xest) measurements = np.asarray(measurements) # plot data if DO_PLOT: plt.plot(xest[:, 0]) plt.plot(xest[:, 1]) plt.plot(measurements)
def kalman_filter(data, sensor_var, process_var): x0 = (data[0], 1) velocity = 0 dt = 1. sensor_var = sensor_var process_var = process_var process_model = (velocity*dt, process_var) x = x0 xs, predictions = [], [] for z in data: prior = predict(x, process_model) #print(prior) likelihood = (z, sensor_var) x = update(prior, likelihood) # save results predictions.append(prior[0]) xs.append(x[0]) return predictions, xs
def proc_form(): """ This is for me to run against the class_form() function to see which, if either, runs faster. They within a few ms of each other on my machine with Python 3.5.1""" dt = 1. std_z = 10.1 x = np.array([[0.], [0.]]) F = np.array([[1., dt], [0., 1.]]) H = np.array([[1., 0.]]) P = np.eye(2) R = np.eye(1)*std_z**2 Q = Q_discrete_white_noise(2, dt, 5.1) pos = 0. for t in range(2000): z = pos + random.randn() * std_z pos += 100 # perform kalman filtering x, P = predict(x, P, F, Q) x, P, _ = update(z, R, x, P, H)
def example(): dt = 0.1 x = np.array([10.0, 4.5]) P = np.diag([500, 49]) F = np.array([[1, dt], [0, 1]]) # Q is the process noise fig = plt.figure() for _ in range(40): # if the history should be deleted uncomment #fig.clear() plot_covariance_ellipse(x, P, edgecolor='r') x, P = predict(x=x, P=P, F=F, Q=0) print('x =', x) plot_covariance_ellipse(x, P, edgecolor='k', ls='dashed') fig.canvas.draw() plt.show(block=False) time.sleep(0.1) plt.show()
def proc_form(): """ This is for me to run against the class_form() function to see which, if either, runs faster. They within a few ms of each other on my machine with Python 3.5.1""" dt = 1. std_z = 10.1 x = np.array([[0.], [0.]]) F = np.array([[1., dt], [0., 1.]]) H = np.array([[1., 0.]]) P = np.eye(2) R = np.eye(1) * std_z**2 Q = Q_discrete_white_noise(2, dt, 5.1) pos = 0. for t in range(2000): z = pos + random.randn() * std_z pos += 100 # perform kalman filtering x, P = predict(x, P, F, Q) x, P, _ = update(z, R, x, P, H)
def kalman_filter(z): global x global process_model # -------- PREDICT --------- # # X is the state of the system # P is the variance of the system # u is the movement of the system due to the process # Q is the noise of the process x, P = kf.predict(x=x, P=process_model) # sensor says z with a standard deviation of sensorvar**2 # probability of the measurement given the current state #likelihood = gaussian(z, mean(z), sensor_var) # -------- UPDATE --------- # # X is the state of the system # P is the variance of the system # z is the measurement # R is the measurement variance x, P = kf.update(x=x, P=P, z=z, R=sensor_var) # print( "x: ",'%.3f' % x, "var: " '%.3f' % P, "z: ", '%.3f' % z) #print(x) return x
s_variance=s_variance, r_variance=r_variance, s_noise=s_noise, r_noise=r_noise, R=R, ) s = Saver(kf) # ------ RUN FILTER ------ if observe_every > 1: data.loc[data.index % observe_every != 0, "q_obs"] = np.nan # Iterate over the Kalman Filter for time_ix, z in enumerate(np.vstack([data["q_obs"], data["r_obs"]]).T): kf.predict() # only make update steps every n timesteps if time_ix % observe_every == 0: kf.update(z) s.save() s.to_array() # only observe every n values # data["q_true_original"] = data["q_true"] # update data with POSTERIOR estimates # Calculate the DISCHARGE (measurement operator * \bar{x}) data["q_filtered"] = (s.H @ s.x)[:, 0] data["q_variance"] = (s.H @ s.P)[:, 0, 0]
def predict_measurement(x, P, z, R, H): x_pred, P_pred = predict(x, P, H, R) return multivariate_normal(x_pred, P_pred).pdf(z)
[y[j]], [x[j] - x[j-1]], [y[j] - y[j-1]]]) # If near constant acceleration model, measure position, velocity and acceleration. elif args.model == 'nca': meas = np.array([[x[j]], [y[j]], [x[j]-x[j-1]], [y[j]-y[j-1]], [(x[j] - x[j-1]) - (x[j-1] - x[j-2])], [(y[j] - y[j-1]) - (y[j-1] - y[j-2])]]) # If doing sanity check, use library. Else use provided kalman_step function. if args.sanity_check: state, covariance = predict(state, covariance, A, Q_i) state, covariance = update(state, covariance, meas, R_i, C) else: state, covariance, _ , _ = kalman_step(A, C, Q_i, R_i, meas, state, covariance) # Get position (velocity, acceleration) from deduced state. sx[j] = state[0] sy[j] = state[1] if args.model in {'ncv', 'nca'}: sx_d[j] = state[2] sy_d[j] = state[3] if args.model == 'nca': sx_dd[j] = state[4] sy_dd[j] = state[5]
a1 = 0.5 a2 = 0.1 # z = np.random.normal(loc=5, scale=3) # x # y1 = np.random.normal(loc=10, scale=2) # x' # y2 = np.random.normal(loc=5, scale=1) # x'' x = np.array([5., 10, 5]) P = np.diag([3, 2, 1]) F = np.array([[0, 1, 0], [0, a1, a2], [0, 1, 0]]) x, P = predict(x=x, P=P, F=F, Q=0) from filterpy.common import Q_discrete_white_noise Q = Q_discrete_white_noise(dim=3, dt=1., var=2.35) # 3 x 3 H = np.array([[1.0, 0, 0]]) R = np.diag([[1., 0, 0]]) from filterpy.kalman import update z = 1. # x, P = update(x, P, z, R, H) from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise def pos_vel_filter(x, P, R, Q=0., dt=1.0):
def run_1D_filter(data: pd.DataFrame, R: float, Q: float, P0: float, S0: float = 5.74) -> pd.DataFrame: """Run a simple 1D Kalman Filter on noisy simulated data. Initialize: We have an initial estimate of the state variance (`P0`). We use the first 'measurement' as our initial estimate (`X[0] = z[0]`) Predict: We require an initial input storage value - `S0`. We use the ABC model as our process model to produce a prediction of X (discharge). We assume the error/variance of this process to be `Q` (process error). Update: We use noisy observations (simulated from the ABCModel) as measurements `z`. We assume the noise on the simulated data to be `R` (measurement error). Our filtered state (X) Note: - The ABC model has an internal state parameter (S[t]) that we are currently not using. - The ABC model produces a direct estimate of X, rather than the change in X. We calculate the change in x (`dx`) as `dx = Qsim - X`, i.e. the current estimate minus the previous (filtered) estimate. - The improved state (X = discharge) estimated by the filter is not fed back into the ABC model. We use the `filterpy.kalman` implementation of the Kalman Filter. Args: data (pd.DataFrame): driving data, including precipitation (for driving the ABCmodel) & the noisy Qsim values. Must have columns: ['precip', 'qsim', 'qsim_noise', 'ssim'] R (float): The measurement noise (because we have simulated data this is known) Q (float): The process noise (variance / uncertainty on the ABC Model estimate) P0 (float): The initial estimate for the state noise (variance on the prior / state uncertainty) S0 (float, optional): [description]. Defaults to 5.74. Returns: pd.DataFrame: [description] """ assert all(np.isin(["precip", "qsim", "qsim_noise", "ssim"], data.columns)) measured_values = [] predicted_values = [] filtered_values = [] kalman_gains = [] log_likelihoods = [] residuals = [] # initialize step P = P0 X = data["qsim_noise"][0] storage = S0 # Kalman Filtered Qsim for ix, precip in enumerate(data["precip"]): # predict step Qsim, storage = abcmodel(storage, precip) # process model (simulate Q) dx = Qsim - X # convert into a change (dx) X, P = kf.predict(x=X, P=P, u=dx, Q=Q) predicted_values.append(X) # update step z = data["qsim_noise"][ix] # measurement X, P, y, K, S, log_likelihood = kf.update(x=X, P=P, z=z, R=R, return_all=True) filtered_values.append(X) measured_values.append(z) kalman_gains.append(float(K)) log_likelihoods.append(float(log_likelihood)) residuals.append(float(y)) out = pd.DataFrame({ "measured": measured_values, "predicted": predicted_values, "filtered": filtered_values, "unobserved": data["qsim"], "kalman_gain": kalman_gains, "log_likelihood": log_likelihoods, "residual": residuals, }) return out
V = np.zeros(shape=(25, 2)) Z = np.zeros(shape=(25, 2)) Q = np.array([[0.0001, 0.00002], [0.00002, 0.0001]]) R = np.array([[0.01, 0.005], [0.005, 0.02]]) x = x2 = np.array([[0.,0.]]).T H = B = P = np.eye(2) # P = 0.0001*P #use this when estimate case 2 with open('data.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ') for i, row in enumerate(reader): a = row[0].split(',') V[i] = [float(a[0]), float(a[1])] Z[i] = [float(a[2]), float(a[3])] result = [] for i in range(25): u = np.array([[V[i][0]], [V[i][1]]]) z = np.array([[Z[i][0]], [Z[i][1]]]) x, P = predict(x, P, F=1, Q=Q, u=u, B=B) x, P = update(x, P, z, R, H) result.append((x[:2]).tolist()) plt.plot(Z.T[0], Z.T[1], 'ro') x1, x2 = zip(*result) plt.plot(x1, x2, 'b-') # plt.plot(x21, x22, '-') plt.show()