def setUpClass(cls): cls.ssm = {} # setup UNGM with Student RVs x0 = StudentRV(1) q = StudentRV(1, scale=np.array([[10.0]])) r = StudentRV(1) dyn = UNGMTransition(x0, q) obs = UNGMMeasurement(r, dyn.dim_state) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'ungm': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup CV with Student RVs m_0 = np.array([10175, 295, 980, -35]).astype(np.float) P_0 = np.diag([10000, 100, 10000, 100]).astype(np.float) nu_0 = 1000.0 x0 = StudentRV(4, m_0, P_0, nu_0) Q = np.diag([50, 5]).astype(np.float) nu_q = 1000.0 q = StudentRV(2, scale=Q, dof=nu_q) R = np.diag([50, 0.4e-6]).astype(np.float) nu_r = 4.0 r = StudentRV(2, scale=R, dof=nu_r) dyn = ConstantVelocity(x0, q, dt=0.5) obs = Radar2DMeasurement(r, 4) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'cv': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}})
def hypers_demo(lscale=None): print(f"Seed = {np.random.get_state()[1][0]}") # set default lengthscales if unspecified if lscale is None: lscale = [1e-3, 3e-3, 1e-2, 3e-2, 1e-1, 3e-1, 1, 3, 1e1, 3e1, 1e2, ] steps, mc = 500, 100 # setup univariate non-stationary growth model x0 = GaussRV(1, cov=np.atleast_2d(5.0)) q = GaussRV(1, cov=np.atleast_2d(10.0)) dyn = UNGMTransition(x0, q) # dynamics r = GaussRV(1) obs = UNGMMeasurement(r, 1) # observation model x = dyn.simulate_discrete(steps, mc_sims=mc) # generate some data z = obs.simulate_measurements(x) num_el = len(lscale) mean_f, cov_f = np.zeros((dyn.dim_in, steps, mc, num_el)), np.zeros((dyn.dim_in, dyn.dim_in, steps, mc, num_el)) for iel, el in enumerate(lscale): # kernel parameters ker_par = np.array([[1.0, el * dyn.dim_in]]) # initialize BHKF with current lenghtscale f = GaussianProcessKalman(dyn, obs, ker_par, ker_par, points='ut', point_hyp={'kappa': 0.0}) # filtering for s in range(mc): mean_f[..., s, iel], cov_f[..., s, iel] = f.forward_pass(z[..., s]) # evaluate RMSE, NCI and NLL rmseVsEl = squared_error(x[..., na], mean_f) nciVsEl = rmseVsEl.copy() nllVsEl = rmseVsEl.copy() for k in range(steps): for iel in range(num_el): mse_mat = mse_matrix(x[:, k, :], mean_f[:, k, :, iel]) for s in range(mc): nciVsEl[:, k, s, iel] = log_cred_ratio(x[:, k, s], mean_f[:, k, s, iel], cov_f[:, :, k, s, iel], mse_mat) nllVsEl[:, k, s, iel] = neg_log_likelihood(x[:, k, s], mean_f[:, k, s, iel], cov_f[:, :, k, s, iel]) # average out time and MC simulations rmseVsEl = np.sqrt(np.mean(rmseVsEl, axis=1)).mean(axis=1) nciVsEl = nciVsEl.mean(axis=(1, 2)) nllVsEl = nllVsEl.mean(axis=(1, 2)) # plot influence of changing lengthscale on the RMSE and NCI and NLL filter performance plt.figure() plt.semilogx(lscale, rmseVsEl.squeeze(), color='k', ls='-', lw=2, marker='o', label='RMSE') plt.semilogx(lscale, nciVsEl.squeeze(), color='k', ls='--', lw=2, marker='o', label='NCI') plt.semilogx(lscale, nllVsEl.squeeze(), color='k', ls='-.', lw=2, marker='o', label='NLL') plt.grid(True) plt.legend() plt.show() return {'el': lscale, 'rmse': rmseVsEl, 'nci': nciVsEl, 'neg_log_likelihood': nllVsEl}
def test_simulate(self): time_steps = 50 # UNGM additive noise dim = 1 init_dist = GaussRV(dim) noise_dist = GaussRV(dim, cov=np.atleast_2d(10.0)) ungm_dyn = UNGMTransition(init_dist, noise_dist) ungm_meas = UNGMMeasurement(GaussRV(dim), ungm_dyn.dim_state) x = ungm_dyn.simulate_discrete(time_steps, mc_sims=20) y = ungm_meas.simulate_measurements(x) # UNGM non-additive noise ungmna_dyn = UNGMNATransition(init_dist, noise_dist) ungmna_meas = UNGMNAMeasurement(GaussRV(dim), ungm_dyn.dim_state) x = ungmna_dyn.simulate_discrete(time_steps, mc_sims=20) y = ungmna_meas.simulate_measurements(x)
def setUpClass(cls): # setup UNGM x0 = GaussRV(1, cov=np.atleast_2d(1.0)) q = GaussRV(1, cov=np.atleast_2d(10.0)) cls.dyn_ungm = UNGMTransition(x0, q) r = GaussRV(1, cov=np.atleast_2d(1.0)) cls.obs_ungm = UNGMMeasurement(r, 1) # setup pendulum dt = 0.01 x0 = GaussRV(2, np.array([1.5, 0]), 0.01 * np.eye(2)) q = GaussRV(2, cov=np.array([[dt**3 / 3, dt**2 / 2], [dt**2 / 2, dt]])) cls.dyn_pend = Pendulum2DTransition(x0, q, dt) r = GaussRV(1, cov=np.atleast_2d(0.1)) cls.obs_pend = Pendulum2DMeasurement(r, cls.dyn_pend.dim_state)
def setUpClass(cls): cls.ssm = {} # setup UNGM x0 = GaussRV(1) q = GaussRV(1, cov=np.array([[10.0]])) r = GaussRV(1) dyn = UNGMTransition(x0, q) obs = UNGMMeasurement(r, 1) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'ungm': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup UNGM with non-additive noise x0 = GaussRV(1) q = GaussRV(1, cov=np.array([[10.0]])) r = GaussRV(1) dyn = UNGMNATransition(x0, q) obs = UNGMNAMeasurement(r, 1) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'ungmna': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup 2D pendulum x0 = GaussRV(2, mean=np.array([1.5, 0]), cov=0.01 * np.eye(2)) dt = 0.01 q = GaussRV(2, cov=0.01 * np.array([[(dt**3) / 3, (dt**2) / 2], [(dt**2) / 2, dt]])) r = GaussRV(1, cov=np.array([[0.1]])) dyn = Pendulum2DTransition(x0, q, dt=dt) obs = Pendulum2DMeasurement(r, dyn.dim_state) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'pend': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup reentry vehicle radar tracking m0 = np.array([6500.4, 349.14, -1.8093, -6.7967, 0.6932]) P0 = np.diag([1e-6, 1e-6, 1e-6, 1e-6, 1]) x0 = GaussRV(5, m0, P0) q = GaussRV(3, cov=np.diag([2.4064e-5, 2.4064e-5, 1e-6])) r = GaussRV(2, cov=np.diag([1e-6, 0.17e-6])) dyn = ReentryVehicle2DTransition(x0, q) obs = Radar2DMeasurement(r, 5) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'rer': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup coordinated turn bearing only tracking m0 = np.array([1000, 300, 1000, 0, np.deg2rad(-3.0)]) P0 = np.diag([100, 10, 100, 10, 0.1]) x0 = GaussRV(5, m0, P0) dt = 0.1 rho_1, rho_2 = 0.1, 1.75e-4 A = np.array([[dt**3 / 3, dt**2 / 2], [dt**2 / 2, dt]]) Q = np.zeros((5, 5)) Q[:2, :2], Q[2:4, 2:4], Q[4, 4] = rho_1 * A, rho_1 * A, rho_2 * dt q = GaussRV(5, cov=Q) r = GaussRV(4, cov=10e-3 * np.eye(4)) sen = np.vstack((1000 * np.eye(2), -1000 * np.eye(2))).astype(np.float) dyn = CoordinatedTurnTransition(x0, q) obs = BearingMeasurement(r, 5, state_index=[0, 2], sensor_pos=sen) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'ctb': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}}) # setup CTRS with radar measurements x0 = GaussRV(5, cov=0.1 * np.eye(5)) q = GaussRV(2, cov=np.diag([0.1, 0.1 * np.pi])) r = GaussRV(2, cov=np.diag([0.3, 0.03])) dyn = ConstantTurnRateSpeed(x0, q) obs = Radar2DMeasurement(r, 5) x = dyn.simulate_discrete(100) y = obs.simulate_measurements(x) cls.ssm.update({'ctrs': {'dyn': dyn, 'obs': obs, 'x': x, 'y': y}})
def lengthscale_filter_demo(lscale): steps, mc = 500, 20 # initialize UNGM model dyn = UNGMTransition(GaussRV(1, cov=5.0), GaussRV(1, cov=10.0)) obs = UNGMMeasurement(GaussRV(1, cov=1.0), 1) # generate some data x = dyn.simulate_discrete(steps, mc) z = obs.simulate_measurements(x) dim = dyn.dim_state num_el = len(lscale) # lscale = [1e-3, 3e-3, 1e-2, 3e-2, 1e-1, 3e-1, 1, 3, 1e1, 3e1] # , 1e2, 3e2] mean_f, cov_f = np.zeros((dim, steps, mc, num_el)), np.zeros( (dim, dim, steps, mc, num_el)) for iel, el in enumerate(lscale): # kernel parameters ker_par = np.array([[1.0, el * dim]]) # initialize BHKF with current lenghtscale f = GaussianProcessKalman(dyn, obs, ker_par, ker_par, kernel='rbf', points='ut') # filtering for s in range(mc): mean_f[..., s, iel], cov_f[..., s, iel] = f.forward_pass(z[..., s]) # evaluate RMSE, NCI and NLL rmseVsEl = squared_error(x[..., na], mean_f) nciVsEl = rmseVsEl.copy() nllVsEl = rmseVsEl.copy() for k in range(steps): for iel in range(num_el): mse_mat = mse_matrix(x[:, k, :], mean_f[:, k, :, iel]) for s in range(mc): nciVsEl[:, k, s, iel] = log_cred_ratio(x[:, k, s], mean_f[:, k, s, iel], cov_f[:, :, k, s, iel], mse_mat) nllVsEl[:, k, s, iel] = neg_log_likelihood(x[:, k, s], mean_f[:, k, s, iel], cov_f[:, :, k, s, iel]) # average out time and MC simulations rmseVsEl = np.sqrt(np.mean(rmseVsEl, axis=1)).mean(axis=1) nciVsEl = nciVsEl.mean(axis=(1, 2)) nllVsEl = nllVsEl.mean(axis=(1, 2)) # plot influence of changing lengthscale on the RMSE and NCI and NLL filter performance plt.figure() plt.semilogx(lscale, rmseVsEl.squeeze(), color='k', ls='-', lw=2, marker='o', label='RMSE') plt.semilogx(lscale, nciVsEl.squeeze(), color='k', ls='--', lw=2, marker='o', label='NCI') plt.semilogx(lscale, nllVsEl.squeeze(), color='k', ls='-.', lw=2, marker='o', label='NLL') plt.grid(True) plt.legend() plt.show() plot_data = { 'el': lscale, 'rmse': rmseVsEl, 'nci': nciVsEl, 'neg_log_likelihood': nllVsEl } return plot_data
def tables(): steps, mc = 500, 100 # initialize UNGM model dyn = UNGMTransition(GaussRV(1, cov=5.0), GaussRV(1, cov=10.0)) obs = UNGMMeasurement(GaussRV(1, cov=1.0), 1) # generate some data np.random.seed(0) x = dyn.simulate_discrete(steps, mc) z = obs.simulate_measurements(x) par_ut = np.array([[3.0, 0.3]]) par_gh5 = np.array([[5.0, 0.6]]) par_gh7 = np.array([[3.0, 0.4]]) mulind_ut = np.array([[0, 1, 2]]) mulind_gh = lambda degree: np.atleast_2d(np.arange(degree)) # initialize filters/smoothers algorithms = ( # Classical filters UnscentedKalman(dyn, obs, alpha=1.0, beta=0.0), GaussHermiteKalman(dyn, obs, deg=5), GaussHermiteKalman(dyn, obs, deg=7), # GPQ filters GaussianProcessKalman(dyn, obs, par_ut, par_ut, kernel='rbf', points='ut', point_hyp={'alpha': 1.0}), GaussianProcessKalman(dyn, obs, par_gh5, par_gh5, kernel='rbf', points='gh', point_hyp={'degree': 5}), GaussianProcessKalman(dyn, obs, par_gh7, par_gh7, kernel='rbf', points='gh', point_hyp={'degree': 7}), # BSQ filters BayesSardKalman(dyn, obs, par_ut, par_ut, mulind_ut, mulind_ut, points='ut', point_hyp={'alpha': 1.0}), BayesSardKalman(dyn, obs, par_gh5, par_gh5, mulind_gh(5), mulind_gh(5), points='gh', point_hyp={'degree': 5}), BayesSardKalman(dyn, obs, par_gh7, par_gh7, mulind_gh(7), mulind_gh(7), points='gh', point_hyp={'degree': 7}), ) num_algs = len(algorithms) # space for estimates dim = dyn.dim_state mean_f, cov_f = np.zeros((dim, steps, mc, num_algs)), np.zeros( (dim, dim, steps, mc, num_algs)) mean_s, cov_s = np.zeros((dim, steps, mc, num_algs)), np.zeros( (dim, dim, steps, mc, num_algs)) # do filtering/smoothing t0 = time.time() # measure execution time print('Running filters/smoothers ...') for a, alg in enumerate(algorithms): print('{}'.format( alg.__class__.__name__)) # print filter/smoother name for sim in range(mc): mean_f[..., sim, a], cov_f[..., sim, a] = alg.forward_pass(z[..., sim]) mean_s[..., sim, a], cov_s[..., sim, a] = alg.backward_pass() alg.reset() print('Done in {0:.4f} [sec]'.format(time.time() - t0)) # evaluate perfomance scores = evaluate_performance(x, mean_f, cov_f, mean_s, cov_s) rmseMean_f, nciMean_f, nllMean_f, rmseMean_s, nciMean_s, nllMean_s = scores[: 6] rmseStd_f, nciStd_f, nllStd_f, rmseStd_s, nciStd_s, nllStd_s = scores[6:] # put data into Pandas DataFrame for fancy printing and latex export # row_labels = ['SR', 'UT', 'GH-5', 'GH-7', 'GH-10', 'GH-15', 'GH-20'] row_labels = ['UT', 'GH-5', 'GH-7'] num_labels = len(row_labels) col_labels = [ 'Classical', 'GPQ', 'BSQ', 'Classical (2std)', 'GPQ (2std)', 'BSQ (2std)' ] rmse_table_f = pd.DataFrame(np.hstack( (rmseMean_f.reshape(3, num_labels).T, rmseStd_f.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) nci_table_f = pd.DataFrame(np.hstack( (nciMean_f.reshape(3, num_labels).T, nciStd_f.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) nll_table_f = pd.DataFrame(np.hstack( (nllMean_f.reshape(3, num_labels).T, nllStd_f.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) rmse_table_s = pd.DataFrame(np.hstack( (rmseMean_s.reshape(3, num_labels).T, rmseStd_s.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) nci_table_s = pd.DataFrame(np.hstack( (nciMean_s.reshape(3, num_labels).T, nciStd_s.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) nll_table_s = pd.DataFrame(np.hstack( (nllMean_s.reshape(3, num_labels).T, nllStd_s.reshape(3, num_labels).T)), index=row_labels, columns=col_labels) # print kernel parameters print('Kernel parameters') print('{:5}: {}'.format('UT', par_ut)) print('{:5}: {}'.format('GH-5', par_gh5)) print('{:5}: {}'.format('GH-7', par_gh7)) print() # print tables pd.set_option('precision', 2, 'display.max_columns', 6) print('Filter RMSE') print(rmse_table_f) print('Filter NCI') print(nci_table_f) print('Filter NLL') print(nll_table_f) print('Smoother RMSE') print(rmse_table_s) print('Smoother NCI') print(nci_table_s) print('Smoother NLL') print(nll_table_s) # return computed metrics for filters and smoothers return { 'filter_RMSE': rmse_table_f, 'filter_NCI': nci_table_f, 'filter_NLL': nll_table_f, 'smoother_RMSE': rmse_table_s, 'smoother_NCI': nci_table_s, 'smoother_NLL': nll_table_s }
import pandas as pd from research.gpq.icinco_demo import evaluate_performance from ssmtoybox.mtran import UnscentedTransform from ssmtoybox.ssinf import ExtendedKalman, ExtendedKalmanGPQD from ssmtoybox.ssmod import UNGMTransition, UNGMMeasurement from ssmtoybox.utils import GaussRV steps, mc = 50, 10 # time steps, mc simulations # setup univariate non-stationary growth model x0 = GaussRV(1, cov=np.atleast_2d(5.0)) q = GaussRV(1, cov=np.atleast_2d(10.0)) dyn = UNGMTransition(x0, q) # dynamics r = GaussRV(1) obs = UNGMMeasurement(r, 1) # observation model x = dyn.simulate_discrete(steps, mc) z = obs.simulate_measurements(x) # use only the central sigma-point usp_0 = np.zeros((dyn.dim_in, 1)) usp_ut = UnscentedTransform.unit_sigma_points(dyn.dim_in) # set the RBF kernel hyperparameters hyp_rbf = np.array([[1.0] + dyn.dim_in * [3.0]]) hyp_rbf_ut = np.array([[8.0] + dyn.dim_in * [0.5]]) # derivative observations only at the central point der_mask = np.array([0])
def tables(steps=500, sims=100): # setup univariate non-stationary growth model x0 = GaussRV(1, cov=np.atleast_2d(5.0)) q = GaussRV(1, cov=np.atleast_2d(10.0)) dyn = UNGMTransition(x0, q) # dynamics r = GaussRV(1) obs = UNGMMeasurement(r, 1) # observation model x = dyn.simulate_discrete(steps, mc_sims=sims) # generate some data z = obs.simulate_measurements(x) kern_par_sr = np.array([[1.0, 0.3 * dyn.dim_in]]) kern_par_ut = np.array([[1.0, 3.0 * dyn.dim_in]]) kern_par_gh = np.array([[1.0, 0.1 * dyn.dim_in]]) # initialize filters/smoothers algorithms = ( CubatureKalman(dyn, obs), UnscentedKalman(dyn, obs), GaussHermiteKalman(dyn, obs), GaussHermiteKalman(dyn, obs), GaussHermiteKalman(dyn, obs), GaussHermiteKalman(dyn, obs), GaussHermiteKalman(dyn, obs), GaussianProcessKalman(dyn, obs, kern_par_sr, kern_par_sr, points='sr'), GaussianProcessKalman(dyn, obs, kern_par_ut, kern_par_ut, points='ut'), GaussianProcessKalman(dyn, obs, kern_par_sr, kern_par_sr, points='gh', point_hyp={'degree': 5}), GaussianProcessKalman(dyn, obs, kern_par_gh, kern_par_gh, points='gh', point_hyp={'degree': 7}), GaussianProcessKalman(dyn, obs, kern_par_gh, kern_par_gh, points='gh', point_hyp={'degree': 10}), GaussianProcessKalman(dyn, obs, kern_par_gh, kern_par_gh, points='gh', point_hyp={'degree': 15}), GaussianProcessKalman(dyn, obs, kern_par_gh, kern_par_gh, points='gh', point_hyp={'degree': 20}), ) num_algs = len(algorithms) # space for estimates mean_f, cov_f = np.zeros((dyn.dim_in, steps, sims, num_algs)), np.zeros((dyn.dim_in, dyn.dim_in, steps, sims, num_algs)) mean_s, cov_s = np.zeros((dyn.dim_in, steps, sims, num_algs)), np.zeros((dyn.dim_in, dyn.dim_in, steps, sims, num_algs)) # do filtering/smoothing t0 = time.time() # measure execution time print('Running filters/smoothers ...', flush=True) for a, alg in enumerate(algorithms): for sim in trange(sims, desc='{:25}'.format(alg.__class__.__name__), file=sys.stdout): mean_f[..., sim, a], cov_f[..., sim, a] = alg.forward_pass(z[..., sim]) mean_s[..., sim, a], cov_s[..., sim, a] = alg.backward_pass() alg.reset() print('Done in {0:.4f} [sec]'.format(time.time() - t0)) # evaluate perfomance scores = evaluate_performance(x, mean_f, cov_f, mean_s, cov_s) rmseMean_f, nciMean_f, nllMean_f, rmseMean_s, nciMean_s, nllMean_s = scores[:6] rmseStd_f, nciStd_f, nllStd_f, rmseStd_s, nciStd_s, nllStd_s = scores[6:] # put data into Pandas DataFrame for fancy printing and latex export row_labels = ['SR', 'UT', 'GH-5', 'GH-7', 'GH-10', 'GH-15', 'GH-20'] # [alg.__class__.__name__ for alg in algorithms] col_labels = ['Classical', 'Bayesian', 'Classical (2std)', 'Bayesian (2std)'] rmse_table_f = pd.DataFrame(np.hstack((rmseMean_f.reshape(2, 7).T, rmseStd_f.reshape(2, 7).T)), index=row_labels, columns=col_labels) nci_table_f = pd.DataFrame(np.hstack((nciMean_f.reshape(2, 7).T, nciStd_f.reshape(2, 7).T)), index=row_labels, columns=col_labels) nll_table_f = pd.DataFrame(np.hstack((nllMean_f.reshape(2, 7).T, nllStd_f.reshape(2, 7).T)), index=row_labels, columns=col_labels) rmse_table_s = pd.DataFrame(np.hstack((rmseMean_s.reshape(2, 7).T, rmseStd_s.reshape(2, 7).T)), index=row_labels, columns=col_labels) nci_table_s = pd.DataFrame(np.hstack((nciMean_s.reshape(2, 7).T, nciStd_s.reshape(2, 7).T)), index=row_labels, columns=col_labels) nll_table_s = pd.DataFrame(np.hstack((nllMean_s.reshape(2, 7).T, nllStd_s.reshape(2, 7).T)), index=row_labels, columns=col_labels) # print tables print('Filter RMSE') print(rmse_table_f) print('Filter NCI') print(nci_table_f) print('Filter NLL') print(nll_table_f) print('Smoother RMSE') print(rmse_table_s) print('Smoother NCI') print(nci_table_s) print('Smoother NLL') print(nll_table_s) # return computed metrics for filters and smoothers return {'filter_RMSE': rmse_table_f, 'filter_NCI': nci_table_f, 'filter_NLL': nll_table_f, 'smoother_RMSE': rmse_table_s, 'smoother_NCI': nci_table_s, 'smoother_NLL': nll_table_s}