def _update_discrete_nonlinear(time, randvar, data, measmod, ut, **kwargs): mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) sigmapts = ut.sigma_points(mpred, cpred) proppts = ut.propagate(time, sigmapts, measmod.dynamics) meascov = measmod.diffusionmatrix(time, **kwargs) meanest, covest, ccest = ut.estimate_statistics(proppts, sigmapts, meascov, mpred) mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) return Normal(mean, cov), covest, ccest, meanest
def test_transition_rv(self): mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) cke, _ = self.lm.transition_rv(rv=rvar, start=0.0, stop=1.0, euler_step=1.0) diff_mean = self.driftmat @ rvar.mean + self.force - cke.mean + rvar.mean diff_cov = (self.driftmat @ rvar.cov + rvar.cov @ self.driftmat.T + self.dispmat @ self.diffmat @ self.dispmat.T + rvar.cov - cke.cov) self.assertLess(np.linalg.norm(diff_mean), 1e-14) self.assertLess(np.linalg.norm(diff_cov), 1e-14)
def transition_rv(self, rv, start, stop, **kwargs): if not isinstance(rv, Normal): errormsg = ("Closed form solution for Chapman-Kolmogorov " "equations in LTI SDE models is only " "available for Gaussian initial conditions.") raise TypeError(errormsg) disc_dynamics, disc_force, disc_diffusion = self._discretise( step=(stop - start)) old_mean, old_cov = rv.mean, rv.cov new_mean = disc_dynamics @ old_mean + disc_force new_crosscov = old_cov @ disc_dynamics.T new_cov = disc_dynamics @ new_crosscov + disc_diffusion return Normal(mean=new_mean, cov=new_cov), {"crosscov": new_crosscov}
def _predict_nonlinear(self, start, randvar, **kwargs): """ Executes unscented transform! """ mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) sigmapts = self.ut.sigma_points(mean, covar) proppts = self.ut.propagate(start, sigmapts, self.dynamod.dynamics) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) mpred, cpred, crosscov = self.ut.estimate_statistics( proppts, sigmapts, diffmat, mean) return Normal(mpred, cpred), {"crosscov": crosscov}
def sample(self, time, state, **kwargs): """ Samples x_{t} ~ p(x_{t} | x_{s}) as a function of t and x_s (plus additional parameters). In a discrete system, i.e. t = s + 1, s \\in \\mathbb{N} In an ODE solver setting, one of the additional parameters would be the step size. """ dynavl = self.dynamics(time, state, **kwargs) diffvl = self.diffusionmatrix(time, **kwargs) rv = Normal(dynavl, diffvl) return rv.sample()
def test_chapmankolmogorov_super_comparison(self): """ The result of chapmankolmogorov() should be identical to the matrix fraction decomposition technique implemented in LinearSDE, just faster. """ # pylint: disable=bad-super-call mean, cov = np.ones(self.prior.ndim), np.eye(self.prior.ndim) initrv = Normal(mean, cov) cke_super, __ = super(type(self.prior), self.prior).chapmankolmogorov( 0.0, STEP, STEP, initrv) cke, __ = self.prior.chapmankolmogorov(0.0, STEP, STEP, initrv) self.assertAllClose(cke_super.mean, cke.mean, 1e-14) self.assertAllClose(cke_super.cov, cke.cov, 1e-14)
def test_chapmankolmogorov(self): """ Test if CK-solution for a single step is according to closed form of IBM kernels.. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) delta = 0.1 cke, __ = self.lti.chapmankolmogorov(0.0, 1.0, delta, rvar) ah, xih, qh = ibm_a(1.0), ibm_xi(1.0), ibm_q(1.0) diff_mean = np.linalg.norm(ah @ rvar.mean + xih - cke.mean) diff_cov = np.linalg.norm(ah @ rvar.cov @ ah.T + qh - cke.cov) self.assertLess(diff_mean, 1e-14) self.assertLess(diff_cov, 1e-14)
def test_chapmankolmogorov(self): """ Test if CK-solution for a single step is according to iteration. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) rvar = Normal(mean, cov) cke, __ = self.lm.chapmankolmogorov(0.0, 1.0, 1.0, rvar) diff_mean = self.driftmat @ rvar.mean + self.force - cke.mean + rvar.mean diff_cov = (self.driftmat @ rvar.cov + rvar.cov @ self.driftmat.T + self.dispmat @ self.diffmat @ self.dispmat.T + rvar.cov - cke.cov) self.assertLess(np.linalg.norm(diff_mean), 1e-14) self.assertLess(np.linalg.norm(diff_cov), 1e-14)
def _predict_linear(self, start, randvar, **kwargs): """ Basic Kalman update because model is linear. """ mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) dynamat = self.dynamod.dynamicsmatrix(start, **kwargs) forcevec = self.dynamod.forcevector(start, **kwargs) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) mpred = dynamat @ mean + forcevec crosscov = covar @ dynamat.T cpred = dynamat @ crosscov + diffmat return Normal(mpred, cpred), {"crosscov": crosscov}
def setUp(self): initrv = Normal(20 * np.ones(2), 0.1 * np.eye(2), cov_cholesky=np.sqrt(0.1) * np.eye(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 f = self.ivp.rhs t0, tmax = self.ivp.timespan y0 = self.ivp.initrv.mean self.solution = probsolve_ivp(f, t0, tmax, y0, step=step, adaptive=False)
def setup_ornsteinuhlenbeck(self): self.dynmod = LTISDEModel( driftmatrix=self.drift, force=self.force, dispmatrix=self.disp, diffmatrix=self.diff, ) self.measmod = DiscreteGaussianLTIModel(dynamat=np.eye(1), forcevec=np.zeros(1), diffmat=self.r * np.eye(1)) self.initrv = Normal(10 * np.ones(1), np.eye(1)) self.tms = np.arange(0, 20, self.delta_t) self.states, self.obs = generate_cd(dynmod=self.dynmod, measmod=self.measmod, initrv=self.initrv, times=self.tms)
def _solve_chapmankolmogorov_equations(self, start, stop, euler_step, randvar, **kwargs): """ Solves differential equations for mean and kernels of the SDE solution (Eq. 5.50 and 5.51 or Eq. 10.73 in Applied SDEs). By default, we assume that ``randvar`` is Gaussian. """ mean, covar = randvar.mean, randvar.cov time = start while time < stop: meanincr, covarincr = self._increment(time, mean, covar, **kwargs) mean, covar = mean + euler_step * meanincr, covar + euler_step * covarincr time = time + euler_step return Normal(mean, covar), {}
def benes_daum(): """Benes-Daum testcase, example 10.17 in Applied SDEs.""" def f(t, x): return np.tanh(x) def df(t, x): return 1.0 - np.tanh(x)**2 def l(t): return np.ones(1) initmean = np.zeros(1) initcov = 3.0 * np.eye(1) initrv = Normal(initmean, initcov) dynamod = pnss.SDE(dimension=1, driftfun=f, dispmatfun=l, jacobfun=df) measmod = pnss.DiscreteLTIGaussian(np.eye(1), np.zeros(1), np.eye(1)) return dynamod, measmod, initrv, {}
def chapmankolmogorov(self, start, stop, step, randvar, **kwargs): """ Solves differential equations for mean and kernels of the SDE solution (Eq. 5.50 and 5.51 or Eq. 10.73 in Applied SDEs). By default, we assume that ``randvar`` is Gaussian. """ if not issubclass(type(randvar), Normal): errormsg = ("Closed form solution for Chapman-Kolmogorov " "equations in linear SDE models is only " "available for Gaussian initial conditions.") raise ValueError(errormsg) mean, covar = randvar.mean, randvar.cov time = start while time < stop: meanincr, covarincr = self._increment(time, mean, covar, **kwargs) mean, covar = mean + step * meanincr, covar + step * covarincr time = time + step return Normal(mean, covar), None
def pendulum(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.0075 var = 0.32**2 g = 9.81 def f(t, x): x1, x2 = x y1 = x1 + x2 * delta_t y2 = x2 - g * np.sin(x1) * delta_t return np.array([y1, y2]) def df(t, x): x1, x2 = x y1 = [1, delta_t] y2 = [-g * np.cos(x1) * delta_t, 1] return np.array([y1, y2]) def h(t, x): x1, x2 = x return np.array([np.sin(x1)]) def dh(t, x): x1, x2 = x return np.array([[np.cos(x1), 0.0]]) q = 1.0 * (np.diag(np.array([delta_t**3 / 3, delta_t])) + np.diag(np.array([delta_t**2 / 2]), 1) + np.diag(np.array([delta_t**2 / 2]), -1)) r = var * np.eye(1) initmean = np.ones(2) initcov = var * np.eye(2) dynamod = pnfs.statespace.DiscreteGaussian(2, 2, f, lambda t: q, df) measmod = pnfs.statespace.DiscreteGaussian(2, 1, h, lambda t: r, dh) initrv = Normal(initmean, initcov) return dynamod, measmod, initrv, {"dt": delta_t, "tmax": 4}
def chapmankolmogorov(self, start, stop, step, randvar, **kwargs): """ Solves Chapman-Kolmogorov equation from start to stop via step. For LTISDEs, there is a closed form solutions to the ODE for mean and kernels (see super().chapmankolmogorov(...)). We exploit this for [(stop - start)/step] steps. References ---------- Eq. (8) in http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.390.380&rep=rep1&type=pdf and Eq. 6.41 and Eq. 6.42 in Applied SDEs. """ mean, cov = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(cov): mean, cov = mean * np.ones(1), cov * np.eye(1) increment = stop - start newmean = self._predict_mean(increment, mean, **kwargs) newcov, crosscov = self._predict_covar(increment, cov, **kwargs) return Normal(newmean, newcov), crosscov
def __init__( self, ivp: IVP, prior: pnss.Integrator, measurement_model: pnss.DiscreteGaussian, with_smoothing: bool, init_implementation: typing.Callable[ [ typing.Callable, np.ndarray, float, pnss.Integrator, Normal, typing.Optional[typing.Callable], ], Normal, ], initrv: typing.Optional[Normal] = None, ): if initrv is None: initrv = Normal( np.zeros(prior.dimension), np.eye(prior.dimension), cov_cholesky=np.eye(prior.dimension), ) self.gfilt = pnfs.Kalman( dynamics_model=prior, measurement_model=measurement_model, initrv=initrv ) if not isinstance(prior, pnss.Integrator): raise ValueError( "Please initialise a Gaussian filter with an Integrator (see `probnum.statespace`)" ) self.sigma_squared_mle = 1.0 self.with_smoothing = with_smoothing self.init_implementation = init_implementation super().__init__(ivp=ivp, order=prior.ordint)
def setup_pendulum(self): delta_t = 0.0075 var = 0.32**2 g = 9.81 def f(t, x): x1, x2 = x y1 = x1 + x2 * delta_t y2 = x2 - g * np.sin(x1) * delta_t return np.array([y1, y2]) def df(t, x): x1, x2 = x y1 = [1, delta_t] y2 = [-g * np.cos(x1) * delta_t, 1] return np.array([y1, y2]) def h(t, x): x1, x2 = x return np.array([np.sin(x1)]) def dh(t, x): x1, x2 = x return np.array([[np.cos(x1), 0.0]]) q = 1.0 * (np.diag(np.array([delta_t**3 / 3, delta_t])) + np.diag(np.array([delta_t**2 / 2]), 1) + np.diag(np.array([delta_t**2 / 2]), -1)) self.r = var * np.eye(1) initmean = np.ones(2) initcov = var * np.eye(2) self.dynamod = DiscreteGaussianModel(f, lambda t: q, df) self.measmod = DiscreteGaussianModel(h, lambda t: self.r, dh) self.initrv = Normal(initmean, initcov) self.tms = np.arange(0, 4, delta_t) self.q = q self.states, self.obs = generate_dd(self.dynamod, self.measmod, self.initrv, self.tms)
def ornstein_uhlenbeck(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 lam, q, r = 0.21, 0.5, 0.1 drift = -lam * np.eye(1) force = np.zeros(1) disp = np.sqrt(q) * np.eye(1) dynmod = pnfs.statespace.LTISDE( driftmat=drift, forcevec=force, dispmat=disp, ) measmod = pnfs.statespace.DiscreteLTIGaussian( state_trans_mat=np.eye(1), shift_vec=np.zeros(1), proc_noise_cov_mat=r * np.eye(1), ) initrv = Normal(10 * np.ones(1), np.eye(1)) return dynmod, measmod, initrv, {"dt": delta_t, "tmax": 20}
def transition_realization(self, real, start, stop, **kwargs): if not isinstance(real, np.ndarray): raise TypeError(f"Numpy array expected, {type(real)} received.") disc_dynamics, disc_force, disc_diffusion = self._discretise( step=(stop - start)) return Normal(disc_dynamics @ real + disc_force, disc_diffusion), {}
def _rescale(self, rvs): """Rescales covariances according to estimate sigma squared value.""" rvs = [Normal(rv.mean, self.sigma_squared_mle * rv.cov) for rv in rvs] return rvs
def test_transition_rv(self): mean, cov = np.ones(self.mat1.dimension), np.eye(self.mat1.dimension) initrv = Normal(mean, cov) self.mat1.transition_rv(initrv, start=0.0, stop=STEP, step=STEP)
def transition_realization(self, real, start, stop=None): newmean = self._dynafct(start, real) newcov = self._diffmatfct(start) return Normal(newmean, newcov), {}
def _proj_normal_rv(self, rv, coord): """Projection of a normal RV, e.g. to map 'states' to 'function values'.""" q = self._solver.prior.ordint new_mean = rv.mean[coord::(q + 1)] new_cov = rv.cov[coord::(q + 1), coord::(q + 1)] return Normal(new_mean, new_cov)
def setUp(self): initrv = Normal(20 * np.ones(2), 0.1 * np.eye(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 self.solution = probsolve_ivp(self.ivp, step=step)
def test_chapmankolmogorov(self): mean, cov = np.ones(self.mat1.ndim), np.eye(self.mat1.ndim) initrv = Normal(mean, cov) self.mat1.chapmankolmogorov(0.0, STEP, STEP, initrv)
def undo_preconditioning(self, rv): ipre = self.gfilt.dynamicmodel.invprecond newmean = ipre @ rv.mean newcov = ipre @ rv.cov @ ipre.T newrv = Normal(newmean, newcov) return newrv