def pdf(self, loc, time, state, **kwargs): """ Evaluates "future" pdf p(x_t | x_s) at loc. """ dynavl = self.dynamics(time, state, **kwargs) diffvl = self.diffusionmatrix(time, **kwargs) normaldist = Normal(dynavl, diffvl) return normaldist.pdf(loc)
def sample(self, time, state, **kwargs): """ Samples x_{t} ~ p(x_{t} | x_{s}) as a function of t and x_s (plus additional parameters). In a discrete system, i.e. t = s + 1, s \\in \\mathbb{N} In an ODE solver setting, one of the additional parameters would be the step size. """ dynavl = self.dynamics(time, state, **kwargs) diffvl = self.diffusionmatrix(time, **kwargs) rv = Normal(dynavl, diffvl) return rv.sample()
def test_chapmankolmogorov(self): mean, cov = np.ones(self.prior.ndim), np.eye(self.prior.ndim) initrv = Normal(mean, cov) cke, __ = self.prior.chapmankolmogorov(0.0, STEP, STEP, initrv) self.assertAllClose(AH_22_IBM @ initrv.mean, cke.mean, 1e-14) self.assertAllClose(AH_22_IBM @ initrv.cov @ AH_22_IBM.T + QH_22_IBM, cke.cov, 1e-14)
def smooth_step(self, unsmoothed_rv, pred_rv, smoothed_rv, crosscov): """ A single smoother step. Consists of predicting from the filtering distribution at time t to time t+1 and then updating based on the discrepancy to the smoothing solution at time t+1. Parameters ---------- unsmoothed_rv : RandomVariable Filtering distribution at time t. pred_rv : RandomVariable Prediction at time t+1 of the filtering distribution at time t. smoothed_rv : RandomVariable Smoothing distribution at time t+1. crosscov : array_like Cross-covariance between unsmoothed_rv and pred_rv as returned by predict(). """ crosscov = np.asarray(crosscov) initmean, initcov = unsmoothed_rv.mean, unsmoothed_rv.cov predmean, predcov = pred_rv.mean, pred_rv.cov currmean, currcov = smoothed_rv.mean, smoothed_rv.cov if np.isscalar(predmean) and np.isscalar(predcov): predmean = predmean * np.ones(1) predcov = predcov * np.eye(1) newmean = initmean + crosscov @ np.linalg.solve(predcov, currmean - predmean) firstsolve = crosscov @ np.linalg.solve(predcov, currcov - predcov) secondsolve = crosscov @ np.linalg.solve(predcov, firstsolve.T) newcov = initcov + secondsolve.T return Normal(newmean, newcov)
def logistic_ode(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 tmax = 2 logistic = pnd.logistic((0, tmax), initrv=Constant(np.array([0.1])), params=(6, 1)) dynamod = pnfs.statespace.IBM(ordint=3, spatialdim=1) measmod = pnfs.DiscreteEKFComponent.from_ode(logistic, dynamod, np.zeros((1, 1)), ek0_or_ek1=1) initmean = np.array([0.1, 0, 0.0, 0.0]) initcov = np.diag([0.0, 1.0, 1.0, 1.0]) initrv = Normal(initmean, initcov) return dynamod, measmod, initrv, { "dt": delta_t, "tmax": tmax, "ode": logistic }
def car_tracking(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 var = 0.5 dynamat = np.eye(4) + delta_t * np.diag(np.ones(2), 2) dynadiff = ( np.diag(np.array([delta_t**3 / 3, delta_t**3 / 3, delta_t, delta_t])) + np.diag(np.array([delta_t**2 / 2, delta_t**2 / 2]), 2) + np.diag(np.array([delta_t**2 / 2, delta_t**2 / 2]), -2)) measmat = np.eye(2, 4) measdiff = var * np.eye(2) mean = np.zeros(4) cov = 0.5 * var * np.eye(4) dynmod = pnfs.statespace.DiscreteLTIGaussian(state_trans_mat=dynamat, shift_vec=np.zeros(4), proc_noise_cov_mat=dynadiff) measmod = pnfs.statespace.DiscreteLTIGaussian( state_trans_mat=measmat, shift_vec=np.zeros(2), proc_noise_cov_mat=measdiff, ) initrv = Normal(mean, cov) return dynmod, measmod, initrv, {"dt": delta_t, "tmax": 20}
def test_transition_rv(self): mean, cov = np.ones(self.prior.dimension), np.eye(self.prior.dimension) initrv = Normal(mean, cov) cke, __ = self.prior.transition_rv(initrv, 0.0, STEP, step=STEP) self.assertAllClose(AH_22_IBM @ initrv.mean, cke.mean, 1e-14) self.assertAllClose(AH_22_IBM @ initrv.cov @ AH_22_IBM.T + QH_22_IBM, cke.cov, 1e-14)
def linear_discrete_update(meanest, cpred, data, meascov, measmat, mpred): """Kalman update, potentially after linearization.""" covest = measmat @ cpred @ measmat.T + meascov ccest = cpred @ measmat.T mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) return Normal(mean, cov), covest, ccest, meanest
def chapmankolmogorov(self, start, stop, step, randvar, *args, **kwargs): """ Closed form solution to the Chapman-Kolmogorov equations for the integrated Brownian motion. It is given by .. math:: X_{t+h} \\, | \\, X_t \\sim \\mathcal{N}(A(h)X_t, Q(h)) with matrices :math:`A(h)` and `Q(h)` defined by .. math:: [A(h)]_{ij} = \\mathbb{I}_{i\\leq j} \\frac{h^{j-i}}{(j-i)!} .. math:: [Q(h)]_{ij} = \\sigma^2 \\frac{h^{2q+1-i-j}}{(2q+1-i-j)!(q-j)!(q-i)!} The implementation that is used here is more stable than the matrix-exponential implementation in :meth:`super().chapmankolmogorov` which is relevant for combinations of large order :math:`q` and small steps :math:`h`. In these cases even the preconditioning is subject to numerical instability if the transition matrices :math:`A(h)` and :math:`Q(h)` are computed with matrix exponentials. "step" variable is obsolent here and is ignored. """ mean, covar = randvar.mean, randvar.cov ah = self._trans_ibm(start, stop) qh = self._transdiff_ibm(start, stop) mpred = ah @ mean crosscov = covar @ ah.T cpred = ah @ crosscov + qh return Normal(mpred, cpred), crosscov
def _estimate_local_error( self, pred_rv, t_new, calibrated_proc_noise_cov, calibrated_proc_noise_cov_cholesky, **kwargs ): """Estimate the local errors. This corresponds to the approach in [1], implemented such that it is compatible with the EKF1 and UKF. References ---------- .. [1] Schober, M., Särkkä, S. and Hennig, P.. A probabilistic model for the numerical solution of initial value problems. Statistics and Computing, 2019. """ local_pred_rv = Normal( pred_rv.mean, calibrated_proc_noise_cov, cov_cholesky=calibrated_proc_noise_cov_cholesky, ) local_meas_rv, _ = self.gfilt.measure(local_pred_rv, t_new) error = local_meas_rv.cov.diagonal() return np.sqrt(np.abs(error))
def setUp(self): initrv = Normal(20 * np.ones(2), 0.1 * np.eye(2), cov_cholesky=np.sqrt(0.1) * np.eye(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 self.solution = probsolve_ivp(self.ivp, step=step)
def transition_realization(self, real, start, stop, **kwargs): if "euler_step" not in kwargs.keys(): raise TypeError("LinearSDE.transition_* requires a euler_step") euler_step = kwargs["euler_step"] rv = Normal(real, 0 * np.eye(len(real))) return self._solve_chapmankolmogorov_equations(start=start, stop=stop, euler_step=euler_step, randvar=rv)
def test_transition_rv(self): mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) cke, _ = self.lti.transition_rv(rv=rvar, start=0.0, stop=1.0) ah, xih, qh = ibm_a(1.0), ibm_xi(1.0), ibm_q(1.0) diff_mean = np.linalg.norm(ah @ rvar.mean + xih - cke.mean) diff_cov = np.linalg.norm(ah @ rvar.cov @ ah.T + qh - cke.cov) self.assertLess(diff_mean, 1e-14) self.assertLess(diff_cov, 1e-14)
def predict(self, start, stop, randvar, **kwargs): mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) jacob = self.dynamod.jacobian(start, mean, **kwargs) mpred = self.dynamod.dynamics(start, mean, **kwargs) crosscov = covar @ jacob.T cpred = jacob @ crosscov + diffmat return Normal(mpred, cpred), {"crosscov": crosscov}
def test_sample(self): mean, cov = np.zeros(TEST_NDIM), np.eye(TEST_NDIM) randvar = Normal(mean, cov) samp = self.mcm.sample(0.0, 1.0, 0.01, randvar.mean) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) if VISUALISE is True: plt.title("100 Samples of a Mock Object") plt.plot(samp) plt.show()
def setup_cartracking(self): self.dynmod = DiscreteGaussianLTIModel(dynamat=self.dynamat, forcevec=np.zeros(4), diffmat=self.dynadiff) self.measmod = DiscreteGaussianLTIModel(dynamat=self.measmat, forcevec=np.zeros(2), diffmat=self.measdiff) self.initrv = Normal(self.mean, self.cov) self.tms = np.arange(0, 20, self.delta_t) self.states, self.obs = generate_dd(self.dynmod, self.measmod, self.initrv, self.tms)
def transition_rv(self, rv, start, stop=None, **kwargs): if not isinstance(rv, Normal): raise TypeError(f"Normal RV expected, but {type(rv)} received.") dynamat = self.dynamicsmatrix(time=start) diffmat = self.diffusionmatrix(time=start) force = self.forcevector(time=start) new_mean = dynamat @ rv.mean + force new_crosscov = rv.cov @ dynamat.T new_cov = dynamat @ new_crosscov + diffmat return Normal(mean=new_mean, cov=new_cov), {"crosscov": new_crosscov}
def predict(self, start, stop, randvar, **kwargs): """Prediction step for discrete-discrete Kalman filtering.""" mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) dynamat = self.dynamicmodel.dynamicsmatrix(start, **kwargs) forcevec = self.dynamicmodel.forcevector(start, **kwargs) diffmat = self.dynamicmodel.diffusionmatrix(start, **kwargs) mpred = dynamat @ mean + forcevec ccpred = covar @ dynamat.T cpred = dynamat @ ccpred + diffmat return Normal(mpred, cpred), {"crosscov": ccpred}
def _update_discrete_nonlinear(time, randvar, data, measmod, ut, **kwargs): mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) sigmapts = ut.sigma_points(mpred, cpred) proppts = ut.propagate(time, sigmapts, measmod.dynamics) meascov = measmod.diffusionmatrix(time, **kwargs) meanest, covest, ccest = ut.estimate_statistics(proppts, sigmapts, meascov, mpred) mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) return Normal(mean, cov), covest, ccest, meanest
def test_transition_rv(self): mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) cke, _ = self.lm.transition_rv(rv=rvar, start=0.0, stop=1.0, euler_step=1.0) diff_mean = self.driftmat @ rvar.mean + self.force - cke.mean + rvar.mean diff_cov = (self.driftmat @ rvar.cov + rvar.cov @ self.driftmat.T + self.dispmat @ self.diffmat @ self.dispmat.T + rvar.cov - cke.cov) self.assertLess(np.linalg.norm(diff_mean), 1e-14) self.assertLess(np.linalg.norm(diff_cov), 1e-14)
def _predict_nonlinear(self, start, randvar, **kwargs): """ Executes unscented transform! """ mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) sigmapts = self.ut.sigma_points(mean, covar) proppts = self.ut.propagate(start, sigmapts, self.dynamod.dynamics) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) mpred, cpred, crosscov = self.ut.estimate_statistics( proppts, sigmapts, diffmat, mean) return Normal(mpred, cpred), {"crosscov": crosscov}
def transition_rv(self, rv, start, stop, **kwargs): if not isinstance(rv, Normal): errormsg = ("Closed form solution for Chapman-Kolmogorov " "equations in LTI SDE models is only " "available for Gaussian initial conditions.") raise TypeError(errormsg) disc_dynamics, disc_force, disc_diffusion = self._discretise( step=(stop - start)) old_mean, old_cov = rv.mean, rv.cov new_mean = disc_dynamics @ old_mean + disc_force new_crosscov = old_cov @ disc_dynamics.T new_cov = disc_dynamics @ new_crosscov + disc_diffusion return Normal(mean=new_mean, cov=new_cov), {"crosscov": new_crosscov}
def test_chapmankolmogorov(self): """ Test if CK-solution for a single step is according to iteration. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) cke, __ = self.lm.chapmankolmogorov(0.0, 1.0, 1.0, rvar) diff_mean = self.driftmat @ rvar.mean + self.force - cke.mean + rvar.mean diff_cov = (self.driftmat @ rvar.cov + rvar.cov @ self.driftmat.T + self.dispmat @ self.diffmat @ self.dispmat.T + rvar.cov - cke.cov) self.assertLess(np.linalg.norm(diff_mean), 1e-14) self.assertLess(np.linalg.norm(diff_cov), 1e-14)
def test_chapmankolmogorov(self): """ Test if CK-solution for a single step is according to closed form of IBM kernels.. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) delta = 0.1 cke, __ = self.lti.chapmankolmogorov(0.0, 1.0, delta, rvar) ah, xih, qh = ibm_a(1.0), ibm_xi(1.0), ibm_q(1.0) diff_mean = np.linalg.norm(ah @ rvar.mean + xih - cke.mean) diff_cov = np.linalg.norm(ah @ rvar.cov @ ah.T + qh - cke.cov) self.assertLess(diff_mean, 1e-14) self.assertLess(diff_cov, 1e-14)
def test_chapmankolmogorov_super_comparison(self): """ The result of chapmankolmogorov() should be identical to the matrix fraction decomposition technique implemented in LinearSDE, just faster. """ # pylint: disable=bad-super-call mean, cov = np.ones(self.prior.ndim), np.eye(self.prior.ndim) initrv = Normal(mean, cov) cke_super, __ = super(type(self.prior), self.prior).chapmankolmogorov( 0.0, STEP, STEP, initrv) cke, __ = self.prior.chapmankolmogorov(0.0, STEP, STEP, initrv) self.assertAllClose(cke_super.mean, cke.mean, 1e-14) self.assertAllClose(cke_super.cov, cke.cov, 1e-14)
def _predict_linear(self, start, randvar, **kwargs): """ Basic Kalman update because model is linear. """ mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) dynamat = self.dynamod.dynamicsmatrix(start, **kwargs) forcevec = self.dynamod.forcevector(start, **kwargs) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) mpred = dynamat @ mean + forcevec crosscov = covar @ dynamat.T cpred = dynamat @ crosscov + diffmat return Normal(mpred, cpred), {"crosscov": crosscov}
def _solve_chapmankolmogorov_equations(self, start, stop, euler_step, randvar, **kwargs): """ Solves differential equations for mean and kernels of the SDE solution (Eq. 5.50 and 5.51 or Eq. 10.73 in Applied SDEs). By default, we assume that ``randvar`` is Gaussian. """ mean, covar = randvar.mean, randvar.cov time = start while time < stop: meanincr, covarincr = self._increment(time, mean, covar, **kwargs) mean, covar = mean + euler_step * meanincr, covar + euler_step * covarincr time = time + euler_step return Normal(mean, covar), {}
def setUp(self): initrv = Normal(20 * np.ones(2), 0.1 * np.eye(2), cov_cholesky=np.sqrt(0.1) * np.eye(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 f = self.ivp.rhs t0, tmax = self.ivp.timespan y0 = self.ivp.initrv.mean self.solution = probsolve_ivp(f, t0, tmax, y0, step=step, adaptive=False)
def setup_ornsteinuhlenbeck(self): self.dynmod = LTISDEModel( driftmatrix=self.drift, force=self.force, dispmatrix=self.disp, diffmatrix=self.diff, ) self.measmod = DiscreteGaussianLTIModel(dynamat=np.eye(1), forcevec=np.zeros(1), diffmat=self.r * np.eye(1)) self.initrv = Normal(10 * np.ones(1), np.eye(1)) self.tms = np.arange(0, 20, self.delta_t) self.states, self.obs = generate_cd(dynmod=self.dynmod, measmod=self.measmod, initrv=self.initrv, times=self.tms)
def benes_daum(): """Benes-Daum testcase, example 10.17 in Applied SDEs.""" def f(t, x): return np.tanh(x) def df(t, x): return 1.0 - np.tanh(x)**2 def l(t): return np.ones(1) initmean = np.zeros(1) initcov = 3.0 * np.eye(1) initrv = Normal(initmean, initcov) dynamod = pnss.SDE(dimension=1, driftfun=f, dispmatfun=l, jacobfun=df) measmod = pnss.DiscreteLTIGaussian(np.eye(1), np.zeros(1), np.eye(1)) return dynamod, measmod, initrv, {}