def predict(self, ekfstate: GaussParams, # The sampling time in units specified by dynamic_model Ts: float, ) -> GaussParams: """Predict the EKF state Ts seconds ahead.""" x, P = ekfstate # tuple unpacking F = self.dynamic_model.F(x, Ts) Q = self.dynamic_model.Q(x, Ts) x_pred = self.dynamic_model.f(x, Ts) P_pred = F@[email protected]+Q# TODO state_pred = GaussParams(x_pred, P_pred) return state_pred
def _(self, init: dict) -> GaussParams: got_mean = False got_cov = False for key in init: if not got_mean and key in ["mean", "x", "m"]: mean = init[key] got_mean = True if not got_cov and key in ["cov", "P"]: cov = init[key] got_cov = True assert ( got_mean and got_cov ), f"EKF do not recognize mean and cov keys in the dict {init}." return GaussParams(mean, cov)
def update( self, z: np.ndarray, ekfstate: GaussParams, sensor_state: Dict[str, Any] = None ) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) W = P @ la.solve(S, H).T x_upd = x + W @ v P_upd = P - W @ H @ P ekfstate_upd = GaussParams(x_upd, P_upd) return ekfstate_upd
def estimate(self, immstate: MixtureParameters[MT]) -> GaussParams: """Calculate a state estimate with its covariance from immstate""" # ! You can assume all the modes have the same reduce and estimate function # ! and use eg. self.filters[0] functionality means = [] covs = [] for mean, cov in immstate.components: means.append(means) covs.append(cov) means = np.array(means) covs = np.array(cov) means_reduced, covs_reduced = mixturereduction.GaussianMixtureMoments(immstate.weights, means, covs) estimate = GaussParams(means_reduced, covs_reduced) return estimate
def predict( self, ekfstate: GaussParams, # The sampling time in units specified by dynamic_model Ts: float, ) -> GaussParams: """Predict the EKF state Ts seconds ahead.""" x, P = ekfstate F = self.dynamic_model.F(x, Ts) Q = self.dynamic_model.Q(x, Ts) x_pred = self.dynamic_model.f(x, Ts) P_pred = F @ P @ F.T + Q assert np.all(np.isfinite(P_pred)) and np.all( np.isfinite(x_pred)), "Non-finite EKF prediction." state_pred = GaussParams(x_pred, P_pred) return state_pred
def update(self, z: np.ndarray, ekfstate: GaussParams, sensor_state: Dict[str, Any] = None ) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) W = np.linalg.solve(S.T, H@P).T # TODO: kalman gain, Hint: la.solve, la.inv x_upd = x + W@v # TODO: the mean update P_upd = (np.eye(*P.shape)-W@H)@P ekfstate_upd = GaussParams(x_upd, P_upd) return ekfstate_upd
def update(self, z: np.ndarray, ekfstate: GaussParams, sensor_state: Dict[str, Any] = None) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) W = P @ H.T @ la.inv( S) # TODO: the kalman gain, Hint: la.solve, la.inv x_upd = x + W @ v # TODO: the mean update P_upd = (np.eye(4) - W @ H) @ P # TODO: the covariance update ekfstate_upd = GaussParams(x_upd, P_upd) return ekfstate_upd
def update(self, z: np.ndarray, ekfstate: GaussParams, sensor_state: Dict[str, Any] = None) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) # TODO: the kalman gain, Hint: la.solve, la.inv. FIkse mer her? W = P @ H.T @ la.inv(S) x_upd = x + W @ v P_upd = (np.eye(self.dynamic_model.n) - W @ H) @ P ekfstate_upd = GaussParams(x_upd, P_upd) return ekfstate_upd
def predict( self, ekfstate: GaussParams, Ts: float, ) -> GaussParams: """Predict the EKF state Ts seconds ahead.""" x, P = ekfstate assert isPSD(P), "P input to EKF.predict not PSD" F = self.dynamic_model.F(x, Ts) Q = self.dynamic_model.Q(x, Ts) x_pred = self.dynamic_model.f(x, Ts) P_pred = F @ P @ F.T + Q assert np.all(np.isfinite(P_pred)) and np.all( np.isfinite(x_pred)), "Non-finite EKF prediction." assert isPSD(P_pred), "P_pred calculated by EKF.predict not PSD" state_pred = GaussParams(x_pred, P_pred) return state_pred
def update( self, z: np.ndarray, ekfstate: GaussParams, *, sensor_state: Optional[Dict[str, Any]] = None, ) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate assert isPSD(P), "P input to EKF.update not PSD" v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) # Kalman gain W = P @ la.solve(S, H).T # alternative: P @ H.T @ la.inv(S) # mean update x_upd = x + W @ v I = np.eye(*P.shape) # covariance update ## standard form seem to give numerical instability causing non-PSD matrices for certain setups, ## or that some other calculate increases it in IMM etc. ## P_upd = P - W @ H @ P # simple standard form ## Better to use the more numerically stable Joseph form P_upd = (I - W @ H) @ P @ (I - W @ H).T + W @ self.sensor_model.R(x) @ W.T ekfstate_upd = GaussParams(x_upd, P_upd) assert isPSD(P), "P_upd calculated by EKF.update not PSD" return ekfstate_upd
def update( self, z: np.ndarray, ekfstate: GaussParams, sensor_state: Dict[str, Any] = None ) -> GaussParams: """Update ekfstate with z in sensor_state""" x, P = ekfstate v, S = self.innovation(z, ekfstate, sensor_state=sensor_state) H = self.sensor_model.H(x, sensor_state=sensor_state) W = P @ la.solve(S, H).T x_upd = x + W @ v # P_upd = P - W @ H @ P # this version of getting P_upd is more numerically stable I = np.eye(*P.shape) R = self.sensor_model.R(x, sensor_state=sensor_state, z=z) P_upd = (I - W @ H) @ P @ (I - W @ H).T + W @ R @ W.T ekfstate_upd = GaussParams(x_upd, P_upd) return ekfstate_upd
def predict( self, ekfstate: GaussParams, # The sampling time in units specified by dynamic_model Ts: float, ) -> GaussParams: """Predict the EKF state Ts seconds ahead.""" #print("TYPPEEE!!", type(ekfstate)) x, P = ekfstate assert isPSD(P), "P input to EKF.predict not PSD" F = self.dynamic_model.F(x, Ts) Q = self.dynamic_model.Q(x, Ts) x_pred = self.dynamic_model.f(x, Ts) P_pred = F @ P @ F.T + Q assert np.all(np.isfinite(P_pred)) and np.all( np.isfinite(x_pred)), "Non-finite EKF prediction." assert isPSD(P_pred), "P_pred calculated by EKF.predict not PSD" state_pred = GaussParams(x_pred, P_pred) return state_pred
ekf_filter = ekf.EKF(dynamic_model, measurement_model) tracker = pda.PDA(ekf_filter, clutter_intensity, PD, gate_size) # allocate NEES = np.zeros(K) NEESpos = np.zeros(K) NEESvel = np.zeros(K) # initialize x_bar_init = np.array([7100, 3620, 0, 0, 0]) P_bar_init = np.diag([40, 40, 10, 10, 0.1])**2 #init_state = tracker.init_filter_state({"mean": x_bar_init, "cov": P_bar_init}) init_state = GaussParams(x_bar_init, P_bar_init) tracker_update = init_state tracker_update_list = [] tracker_predict_list = [] tracker_estimate_list = [] # estimate for k, (Zk, x_true_k, Tsk) in enumerate(zip(Z, Xgt, Ts)): tracker_predict = tracker.predict(tracker_update, Tsk) tracker_update = tracker.update(Zk, tracker_predict) tracker_estimate = tracker.estimate(tracker_update) NEES[k] = estats.NEES(*tracker_estimate, x_true_k, idxs=np.arange(4)) NEESpos[k] = estats.NEES(*tracker_estimate, x_true_k, idxs=np.arange(2)) NEESvel[k] = estats.NEES(*tracker_estimate, x_true_k, idxs=np.arange(2, 4))
P_bar_init[[0, 1], [0, 1]] = 2 * sigma_z**2 P_bar_init[[2, 3], [2, 3]] = 10**2 init_state = tracker.init_filter_state({"mean": x_bar_init, "cov": P_bar_init}) tracker_update = init_state tracker_update_list = [] tracker_predict_list = [] # estimate for k, (Zk, x_true_k) in enumerate(zip(Z, Xgt)): tracker_predict = tracker.predict(tracker_update, Ts) tracker_update = tracker.update(Zk, tracker_predict) NEES[k] = ekf_filter.NEES(tracker_update, x_true_k[0:4]) x, P = tracker_update NEESpos[k] = ekf_filter.NEES(GaussParams(x[0:2], P[0:2, 0:2]), \ x_true_k[0:2]) NEESvel[k] = ekf_filter.NEES(GaussParams(x[2:4], P[2:4, 2:4]), \ x_true_k[2:4]) tracker_predict_list.append(tracker_predict) tracker_update_list.append(tracker_update) x_hat = np.array([upd.mean for upd in tracker_update_list]) # calculate a performance metric posRMSE = np.sqrt(np.mean((x_hat[:, 0:2] - Xgt[:, 0:2])**2)) velRMSE = np.sqrt(np.mean((x_hat[:, 2:4] - Xgt[:, 2:4])**2)) # %% plots fig3, ax3 = plt.subplots(num=3, clear=True)
if run_three_models: p10 = 0.3 # initvalue for mode probabilities p20 = 0.35 p30 = 0.35 # PI = np.array([[PI11, (1-PI11)/4, 3*(1-PI11)/4], [(1-PI22)/4, PI22, 3*(1-PI22)/4], [(1-PI33)/2, (1-PI33)/2, PI33]]) #alternate transition matrix, more likely to jump into high CV if changing modes PI = np.array([[PI11, (1 - PI11) / 2, (1 - PI11) / 2], [(1 - PI22) / 2, PI22, (1 - PI22) / 2], [(1 - PI33) / 2, (1 - PI33) / 2, PI33]]) assert np.allclose(np.sum(PI, axis=1), 1), "rows of PI must sum to 1" mean_init = np.array([7100, 3620, 0, 0, 0]) cov_init = np.diag([ 40, 40, 10, 10, 0.1 ])**2 # np.diag([1000, 1000, 30, 30, 0.1]) ** 2 # THIS WILL NOT BE GOOD mode_probabilities_init = np.array([p10, p20]) mode_states_init = GaussParams(mean_init, cov_init) init_imm_state = MixtureParameters(mode_probabilities_init, [mode_states_init] * 2) if run_three_models: mode_probabilities_init = np.array([p10, p20, p30]) init_imm_state = MixtureParameters(mode_probabilities_init, [mode_states_init] * 3) assert np.allclose(np.sum(mode_probabilities_init), 1), "initial mode probabilities must sum to 1" # make model measurement_model = measurementmodels.CartesianPosition(sigma_z, state_dim=5) dynamic_models: List[dynamicmodels.DynamicModel] = [] dynamic_models.append(dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5)) dynamic_models.append(dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega))
def _(self, init: Union[Tuple, List]) -> GaussParams: return GaussParams(*init)
ax1.scatter(*Z.T[:2]) # %% tune single filters # parameters sigma_z = 2.25 sigma_a_CV = 0.22 sigma_a_CT = 0.07 sigma_omega = 0.0016 * np.pi # initial values init_mean = np.array([0, 0, 2, 0, 0]) init_cov = np.diag([25, 25, 3, 3, 0.0005]) ** 2 init_state_CV = GaussParams(init_mean[:4], init_cov[:4, :4]) # get rid of turn rate init_state_CT = GaussParams(init_mean, init_cov) # same init otherwise init_states = [init_state_CV, init_state_CT] # create models measurement_model_CV = measurementmodels.CartesianPosition(sigma_z) measurement_model_CT = measurementmodels.CartesianPosition(sigma_z, state_dim=5) CV = dynamicmodels.WhitenoiseAccelleration(sigma_a_CV) CT = dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega) # create filters filters = [] filters.append(ekf.EKF(CV, measurement_model_CV)) filters.append(ekf.EKF(CT, measurement_model_CT)) # allocate
sigma_a = 2.6 sigma_z = 3.1 # create the model and estimator object dynmod = dynamicmodels.WhitenoiseAccelleration(sigma_a) measmod = measurementmodels.CartesianPosition(sigma_z) ekf_filter = ekf.EKF(dynmod, measmod) print(ekf_filter) # Optimal init for model mean = np.array([*Z[1], *(Z[1] - Z[0]) / Ts]) cov11 = sigma_z**2 * np.eye(2) cov12 = sigma_z**2 * np.eye(2) / Ts cov22 = (2 * sigma_z**2 / Ts**2 + sigma_a**2 * Ts / 3) * np.eye(2) cov = np.block([[cov11, cov12], [cov12.T, cov22]]) init_ekfstate = GaussParams(mean, cov) ekfpred_list = [] ekfupd_list = [] ekfupd = init_ekfstate NIS = np.empty(K) NEES_pred = np.empty(K) NEES_upd = np.empty(K) dists_pred = np.empty((K, 2)) dists_upd = np.empty((K, 2)) # estimate for k, (zk, x_true_k) in enumerate(zip(Z[2:], Xgt[2:])): ekfpred = ekf_filter.predict(ekfupd, Ts) ekfupd = ekf_filter.update(zk, ekfpred) NIS[k] = ekf_filter.NIS(zk, ekfpred)
# allocate NEES = np.zeros(K) NEESpos = np.zeros(K) NEESvel = np.zeros(K) # initialize x_bar_init = np.array([*Z[0][true_association[0] - 1], 0, 0]) x_bar_init = np.array([30, -70, 0, 0]) P_bar_init = np.zeros((4, 4)) P_bar_init[[0, 1], [0, 1]] = 2 * sigma_z**2 P_bar_init[[2, 3], [2, 3]] = 20**2 #init_state = tracker.init_filter_state({"mean": x_bar_init, "cov": P_bar_init}) init_state = GaussParams(x_bar_init, P_bar_init) tracker_update = init_state tracker_update_list = [] tracker_predict_list = [] # estimate for k, (Zk, x_true_k) in enumerate(zip(Z, Xgt)): tracker_predict = tracker.predict(tracker_update, Ts) tracker_update = tracker.update(Zk, tracker_predict) NEES[k] = ekf_filter.NEES(tracker_update, x_true_k[0:4]) x, P = tracker_update NEESpos[k] = ekf_filter.NEES(GaussParams(x[0:2], P[0:2, 0:2]), x_true_k[0:2]) NEESvel[k] = ekf_filter.NEES(GaussParams(x[2:4], P[2:4, 2:4]), x_true_k[2:4])
elif model == "CT": # sensor sigma_z = 9 clutter_intensity = 1e-5 PD = 0.8 gate_size = 2 #dynamic model sigma_a_CT = 3.5 sigma_omega = 0.05 #Fører bruk av bare ett filter med CT modellen til dårlig filter consistency? mean_init = np.array([7116, 3617, 0, 0, 0]) cov_init = np.diag([14, 14, 2, 2, 0.01])**2 ekf_init = GaussParams(mean_init, cov_init) # make model measurement_model = measurementmodels.CartesianPosition(sigma_z, state_dim=5) if model == "CV": ekf_filter = ekf.EKF( dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5), measurement_model) elif model == "CT": ekf_filter = ekf.EKF( dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega), measurement_model) tracker = pda.PDA(ekf_filter, clutter_intensity, PD, gate_size)
# make model measurement_model = measurementmodels.CartesianPosition(sigma_z, state_dim=5) dynamic_models: List[dynamicmodels.DynamicModel] = [] dynamic_models.append(dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5)) dynamic_models.append(dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega)) ekf_filters = [] ekf_filters.append(ekf.EKF(dynamic_models[0], measurement_model)) ekf_filters.append(ekf.EKF(dynamic_models[1], measurement_model)) trackers = [] trackers.append(pda.PDA(ekf_filters[0], clutter_intensity, PD, gate_size)) # EKF CV trackers.append(pda.PDA(ekf_filters[1], clutter_intensity, PD, gate_size)) # EKF CT names = ["CV_EKF", "CT_EKF"] init_ekf_state = GaussParams(mean_init, cov_init) # init_imm_pda_state = tracker.init_filter_state(init_ekf_state) NEES = np.zeros(K) NEESpos = np.zeros(K) NEESvel = np.zeros(K) tracker_update_init = [init_ekf_state, init_ekf_state] tracker_update_list = np.empty((len(trackers), len(Xgt)), dtype=MixtureParameters) tracker_predict_list = np.empty((len(trackers), len(Xgt)), dtype=MixtureParameters) tracker_estimate_list = np.empty((len(trackers), len(Xgt)), dtype=MixtureParameters) # estimate Ts = np.insert(Ts,0, 0., axis=0) x_hat = np.empty((len(trackers), len(Xgt), 5)) prob_hat = np.empty((len(trackers), len(Xgt), 2))
tracker = pda.PDA(ekf_filter, clutter_intensity, PD, gate_size) # allocate NEES = np.zeros(K) NEESpos = np.zeros(K) NEESvel = np.zeros(K) # initialize x_bar_init = np.array([*Z[0][true_association[0] - 1], 0, 0]) P_bar_init = np.zeros((4, 4)) P_bar_init[[0, 1], [0, 1]] = 2 * sigma_z**2 P_bar_init[[2, 3], [2, 3]] = 10**2 init_state = GaussParams( x_bar_init, P_bar_init ) # tracker.init_filter_state({"mean": x_bar_init, "cov": P_bar_init}) tracker_update = init_state tracker_update_list = [] tracker_predict_list = [] # estimate print("Start estimating") for k, (Zk, x_true_k) in enumerate(zip(Z, Xgt)): tracker_predict = tracker.predict(tracker_update, Ts) # TODO tracker_update = tracker.update(Zk, tracker_predict) # TODO NEES[k] = (tracker_update.mean - x_true_k[0:4]) @ la.solve( tracker_update.cov, tracker_update.mean - x_true_k[0:4] ) # tracker.state_filter.NEESes(tracker_update, x_true_k) TODO NEESpos[k] = (tracker_update.mean[0:2] - x_true_k[0:2]) @ la.solve( tracker_update.cov[0:2, 0:2], tracker_update.mean[0:2] - x_true_k[0:2]
def init_filter_state(self, init_state: "ET_like"): return GaussParams(init_state['mean'], init_state['cov'])
raise NotImplementedError fig1, ax1 = plt.subplots(num=1, clear=True) ax1.plot(*Xgt.T[:2]) ax1.scatter(*Z.T[:2]) # %% tune single filters sigma_z = 8.7 sigma_a_CT = 0.2 sigma_a_CV = 5.5 sigma_omega = 5e-6 * np.pi init_state = { "mean": np.array(Xgt[0, :]), "cov": np.diag([1, 1, 1, 1, 0.005])**2 } init_state = GaussParams(init_state["mean"], init_state["cov"]) measurement_model = measurementmodels.CartesianPosition( sigma_z, state_dim=5) # note the 5 CV = dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5) # note the 5 CT = dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega) # make models filters = [] filters.append(ekf.EKF(CV, measurement_model)) filters.append(ekf.EKF( CT, measurement_model)) #, so you understand what is going on here pred = [] upd = [] stats = []