def bq_state_does_not_stop(input_dim) -> Tuple[BQState, BQIterInfo]: """BQ state that does not trigger stopping in all stopping criteria.""" integral_mean = 1.0 integral_mean_previous = 2 * integral_mean * (1 - _rel_tol) nevals = _nevals - 2 bq_state = BQState( measure=LebesgueMeasure(input_dim=input_dim, domain=(0, 1)), kernel=ExpQuad(input_shape=(input_dim, )), integral_belief=Normal(integral_mean, 10 * _var_tol), previous_integral_beliefs=(Normal(integral_mean_previous, _var_tol), ), nodes=np.ones((nevals, input_dim)), fun_evals=np.ones(nevals), ) info = BQIterInfo.from_bq_state(bq_state) return bq_state, info
def car_tracking(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 var = 0.5 dynamat = np.eye(4) + delta_t * np.diag(np.ones(2), 2) dynadiff = ( np.diag(np.array([delta_t**3 / 3, delta_t**3 / 3, delta_t, delta_t])) + np.diag(np.array([delta_t**2 / 2, delta_t**2 / 2]), 2) + np.diag(np.array([delta_t**2 / 2, delta_t**2 / 2]), -2)) measmat = np.eye(2, 4) measdiff = var * np.eye(2) mean = np.zeros(4) cov = 0.5 * var * np.eye(4) dynmod = pnss.DiscreteLTIGaussian(state_trans_mat=dynamat, shift_vec=np.zeros(4), proc_noise_cov_mat=dynadiff) measmod = pnss.DiscreteLTIGaussian( state_trans_mat=measmat, shift_vec=np.zeros(2), proc_noise_cov_mat=measdiff, ) initrv = Normal(mean, cov) return dynmod, measmod, initrv, {"dt": delta_t, "tmax": 20}
def logistic_ode(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 tmax = 2 logistic = pnd.logistic((0, tmax), initrv=Constant(np.array([0.1])), params=(6, 1)) dynamod = pnss.IBM(ordint=3, spatialdim=1) measmod = pnfs.DiscreteEKFComponent.from_ode(logistic, dynamod, np.zeros((1, 1)), ek0_or_ek1=1) initmean = np.array([0.1, 0, 0.0, 0.0]) initcov = np.diag([0.0, 1.0, 1.0, 1.0]) initrv = Normal(initmean, initcov) return dynamod, measmod, initrv, { "dt": delta_t, "tmax": tmax, "ode": logistic }
def __init__( self, ivp: IVP, prior: pnss.Integrator, measurement_model: pnss.DiscreteGaussian, with_smoothing: bool, init_implementation: typing.Callable[[ typing.Callable, np.ndarray, float, pnss.Integrator, Normal, typing.Optional[typing.Callable], ], Normal, ], initrv: typing.Optional[Normal] = None, ): if initrv is None: initrv = Normal( np.zeros(prior.dimension), np.eye(prior.dimension), cov_cholesky=np.eye(prior.dimension), ) self.gfilt = pnfs.Kalman(dynamics_model=prior, measurement_model=measurement_model, initrv=initrv) if not isinstance(prior, pnss.Integrator): raise ValueError( "Please initialise a Gaussian filter with an Integrator (see `probnum.statespace`)" ) self.sigma_squared_mle = 1.0 self.with_smoothing = with_smoothing self.init_implementation = init_implementation super().__init__(ivp=ivp, order=prior.ordint)
def setUp(self): initrv = Normal(20 * np.ones(2), 0.1 * np.eye(2), cov_cholesky=np.sqrt(0.1) * np.eye(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 f = self.ivp.rhs t0, tmax = self.ivp.timespan y0 = self.ivp.initrv.mean self.solution = probsolve_ivp(f, t0, tmax, y0, step=step, adaptive=False)
def __init__( self, mean: Union[float, np.floating, np.ndarray], cov: Union[float, np.floating, np.ndarray], dim: Optional[IntArgType] = None, ) -> None: # Extend scalar mean and covariance to higher dimensions if dim has been # supplied by the user # pylint: disable=fixme # TODO: This needs to be modified to account for cases where only either the # mean or covariance is given in scalar form if ((np.isscalar(mean) or mean.size == 1) and (np.isscalar(cov) or cov.size == 1) and dim is not None): mean = np.full((dim, ), mean) cov = cov * np.eye(dim) # Set dimension based on the mean vector if np.isscalar(mean): dim = 1 else: dim = mean.size # If cov has been given as a vector of variances, transform to diagonal matrix if isinstance(cov, np.ndarray) and np.squeeze(cov).ndim == 1 and dim > 1: cov = np.diag(np.squeeze(cov)) # Exploit random variables to carry out mean and covariance checks self.random_variable = Normal(mean=np.squeeze(mean), cov=np.squeeze(cov)) self.mean = self.random_variable.mean self.cov = self.random_variable.cov # Set diagonal_covariance flag if dim == 1: self.diagonal_covariance = True else: self.diagonal_covariance = ( np.count_nonzero(self.cov - np.diag(np.diagonal(self.cov))) == 0) super().__init__( dim=dim, domain=(np.full((dim, ), -np.Inf), np.full((dim, ), np.Inf)), )
def benes_daum(): """Benes-Daum testcase, example 10.17 in Applied SDEs.""" def f(t, x): return np.tanh(x) def df(t, x): return 1.0 - np.tanh(x)**2 def l(t): return np.ones(1) initmean = np.zeros(1) initcov = 3.0 * np.eye(1) initrv = Normal(initmean, initcov) dynamod = pnss.SDE(dimension=1, driftfun=f, dispmatfun=l, jacobfun=df) measmod = pnss.DiscreteLTIGaussian(np.eye(1), np.zeros(1), np.eye(1)) return dynamod, measmod, initrv, {}
def integrate(self, fun: Callable, measure: IntegrationMeasure, nevals: int) -> Tuple[Normal, Dict]: r"""Integrate the function ``fun``. Parameters ---------- fun: The integrand function :math:`f`. measure : An integration measure :math:`\mu`. nevals : Number of function evaluations. Returns ------- F : The integral of ``fun`` against ``measure``. info : Information on the performance of the method. """ # Acquisition policy nodes = self.policy(nevals, measure) fun_evals = fun(nodes) # compute integral mean and variance # Define kernel embedding kernel_embedding = KernelEmbedding(self.kernel, measure) gram = self.kernel(nodes, nodes) kernel_mean = kernel_embedding.kernel_mean(nodes) initial_error = kernel_embedding.kernel_variance() weights = self._solve_gram(gram, kernel_mean) integral_mean = np.squeeze(weights.T @ fun_evals) integral_variance = initial_error - weights.T @ kernel_mean integral = Normal(integral_mean, integral_variance) # Information on result info = {"model_fit_diagnostic": None} return integral, info
def __init__( self, mean: Union[float, np.floating, np.ndarray], cov: Union[float, np.floating, np.ndarray], input_dim: Optional[IntArgType] = None, ) -> None: # Extend scalar mean and covariance to higher dimensions if input_dim has been # supplied by the user if ((np.isscalar(mean) or mean.size == 1) and (np.isscalar(cov) or cov.size == 1) and input_dim is not None): mean = np.full((input_dim, ), mean) cov = cov * np.eye(input_dim) # Set dimension based on the mean vector if np.isscalar(mean): input_dim = 1 else: input_dim = mean.size super().__init__( input_dim=input_dim, domain=(np.full((input_dim, ), -np.Inf), np.full((input_dim, ), np.Inf)), ) # Exploit random variables to carry out mean and covariance checks # squeezes are needed due to the way random variables are currently implemented # pylint: disable=no-member self.random_variable = Normal(mean=np.squeeze(mean), cov=np.squeeze(cov)) self.mean = np.reshape(self.random_variable.mean, (self.input_dim, )) self.cov = np.reshape(self.random_variable.cov, (self.input_dim, self.input_dim)) # Set diagonal_covariance flag if input_dim == 1: self.diagonal_covariance = True else: self.diagonal_covariance = ( np.count_nonzero(self.cov - np.diag(np.diagonal(self.cov))) == 0)
def test_state_from_new_data(state, request): old_state = request.getfixturevalue(state) new_nevals = 5 # some new data x = np.zeros([new_nevals, old_state.input_dim]) y = np.ones(new_nevals) integral = Normal(0, 1) gram = np.eye(new_nevals) kernel_means = np.ones(new_nevals) # previously no data given s = BQState.from_new_data( nodes=x, fun_evals=y, integral_belief=integral, prev_state=old_state, gram=gram, kernel_means=kernel_means, ) # types assert isinstance(s.kernel, Kernel) assert isinstance(s.measure, IntegrationMeasure) assert isinstance(s.kernel_embedding, KernelEmbedding) assert isinstance(s.nodes, np.ndarray) assert isinstance(s.fun_evals, np.ndarray) assert isinstance(s.gram, np.ndarray) assert isinstance(s.kernel_means, np.ndarray) assert isinstance(s.integral_belief, Normal) assert isinstance(s.previous_integral_beliefs, tuple) # shapes assert s.nodes.shape == (new_nevals, s.input_dim) assert s.fun_evals.shape == (new_nevals, ) assert len(s.previous_integral_beliefs) == 1 assert s.gram.shape == (new_nevals, new_nevals) assert s.kernel_means.shape == (new_nevals, ) # values assert s.input_dim == s.measure.input_dim
def pendulum(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.0075 var = 0.32**2 g = 9.81 def f(t, x): x1, x2 = x y1 = x1 + x2 * delta_t y2 = x2 - g * np.sin(x1) * delta_t return np.array([y1, y2]) def df(t, x): x1, x2 = x y1 = [1, delta_t] y2 = [-g * np.cos(x1) * delta_t, 1] return np.array([y1, y2]) def h(t, x): x1, x2 = x return np.array([np.sin(x1)]) def dh(t, x): x1, x2 = x return np.array([[np.cos(x1), 0.0]]) q = 1.0 * (np.diag(np.array([delta_t**3 / 3, delta_t])) + np.diag(np.array([delta_t**2 / 2]), 1) + np.diag(np.array([delta_t**2 / 2]), -1)) r = var * np.eye(1) initmean = np.ones(2) initcov = var * np.eye(2) dynamod = pnss.DiscreteGaussian(2, 2, f, lambda t: q, df) measmod = pnss.DiscreteGaussian(2, 1, h, lambda t: r, dh) initrv = Normal(initmean, initcov) return dynamod, measmod, initrv, {"dt": delta_t, "tmax": 4}
def _estimate_local_error(self, pred_rv, t_new, calibrated_proc_noise_cov, calibrated_proc_noise_cov_cholesky, **kwargs): """Estimate the local errors. This corresponds to the approach in [1], implemented such that it is compatible with the EKF1 and UKF. References ---------- .. [1] Schober, M., Särkkä, S. and Hennig, P.. A probabilistic model for the numerical solution of initial value problems. Statistics and Computing, 2019. """ local_pred_rv = Normal( pred_rv.mean, calibrated_proc_noise_cov, cov_cholesky=calibrated_proc_noise_cov_cholesky, ) local_meas_rv, _ = self.gfilt.measure(local_pred_rv, t_new) error = local_meas_rv.cov.diagonal() return np.sqrt(np.abs(error))
def ornstein_uhlenbeck(): # Below is for consistency with pytest & unittest. # Without a seed, unittest passes but pytest fails. # I tried multiple seeds, they all work equally well. np.random.seed(12345) delta_t = 0.2 lam, q, r = 0.21, 0.5, 0.1 drift = -lam * np.eye(1) force = np.zeros(1) disp = np.sqrt(q) * np.eye(1) dynmod = pnss.LTISDE( driftmat=drift, forcevec=force, dispmat=disp, ) measmod = pnss.DiscreteLTIGaussian( state_trans_mat=np.eye(1), shift_vec=np.zeros(1), proc_noise_cov_mat=r * np.eye(1), ) initrv = Normal(10 * np.ones(1), np.eye(1)) return dynmod, measmod, initrv, {"dt": delta_t, "tmax": 20}
def __call__( self, bq_state: BQState, new_nodes: np.ndarray, new_fun_evals: np.ndarray, *args, **kwargs, ) -> Tuple[Normal, BQState]: """Updates integral belief and BQ state according to the new data given. Parameters ---------- bq_state : Current state of the Bayesian quadrature loop. new_nodes : *shape=(n_eval_new, input_dim)* -- New nodes that have been added. new_fun_evals : *shape=(n_eval_new,)* -- Function evaluations at the given node. Returns ------- updated_belief : Gaussian integral belief after conditioning on the new nodes and evaluations. updated_state : Updated version of ``bq_state`` that contains all updated quantities. """ # Update nodes and function evaluations old_nodes = bq_state.nodes nodes = np.concatenate((bq_state.nodes, new_nodes), axis=0) fun_evals = np.append(bq_state.fun_evals, new_fun_evals) # kernel quantities if old_nodes.size == 0: gram = bq_state.kernel.matrix(new_nodes) kernel_means = bq_state.kernel_embedding.kernel_mean(new_nodes) else: gram_new_new = bq_state.kernel.matrix(new_nodes) gram_old_new = bq_state.kernel.matrix(new_nodes, old_nodes) gram = np.hstack(( np.vstack((bq_state.gram, gram_old_new)), np.vstack((gram_old_new.T, gram_new_new)), )) kernel_means = np.concatenate(( bq_state.kernel_means, bq_state.kernel_embedding.kernel_mean(new_nodes), )) initial_integral_variance = bq_state.kernel_embedding.kernel_variance() weights = self._solve_gram(gram, kernel_means) # integral mean and variance integral_mean = weights @ fun_evals integral_variance = initial_integral_variance - weights @ kernel_means updated_belief = Normal(integral_mean, integral_variance) updated_state = BQState.from_new_data( nodes=nodes, fun_evals=fun_evals, integral_belief=updated_belief, prev_state=bq_state, gram=gram, kernel_means=kernel_means, ) return updated_belief, updated_state
def bq_iterator( self, fun: Optional[Callable] = None, nodes: Optional[np.ndarray] = None, fun_evals: Optional[np.ndarray] = None, integral_belief: Optional[Normal] = None, bq_state: Optional[BQState] = None, info: Optional[BQIterInfo] = None, ) -> Tuple[Normal, np.ndarray, np.ndarray, BQState, BQIterInfo]: """Generator that implements the iteration of the BQ method. This function exposes the state of the BQ method one step at a time while running the loop. Parameters ---------- fun Function to be integrated. It needs to accept a shape=(n_eval, input_dim) ``np.ndarray`` and return a shape=(n_eval,) ``np.ndarray``. nodes *shape=(n_eval, input_dim)* -- Optional nodes at which function evaluations are available as ``fun_evals`` from start. fun_evals *shape=(n_eval,)* -- Optional function evaluations at ``nodes`` available from the start. integral_belief Current belief about the integral. bq_state State of the Bayesian quadrature methods. Contains all necessary information about the problem and the computation. info The state of the iteration. Yields ------ new_integral_belief : Updated belief about the integral. new_nodes : *shape=(n_new_eval, input_dim)* -- The new location(s) at which ``new_fun_evals`` are available found during the iteration. new_fun_evals : *shape=(n_new_eval,)* -- The function evaluations at the new locations ``new_nodes``. new_bq_state : Updated state of the Bayesian quadrature methods. new_info : Updated state of the iteration. """ # Setup BQ state if bq_state is None: if integral_belief is None: # The following is valid only when the prior is zero-mean. integral_belief = Normal( 0.0, KernelEmbedding(self.kernel, self.measure).kernel_variance()) bq_state = BQState( measure=self.measure, kernel=self.kernel, integral_belief=integral_belief, ) integral_belief = bq_state.integral_belief # Setup iteration info if info is None: info = BQIterInfo.from_bq_state(bq_state) if nodes is not None: if fun_evals is None: fun_evals = fun(nodes) integral_belief, bq_state = self.belief_update( bq_state=bq_state, new_nodes=nodes, new_fun_evals=fun_evals, ) # make sure info get the number of initial nodes info.nevals = fun_evals.size # Evaluate stopping criteria for the initial belief _has_converged = self.has_converged(bq_state=bq_state, info=info) yield integral_belief, None, None, bq_state, info while True: # Have we already converged? if _has_converged: break # Select new nodes via policy new_nodes = self.policy(bq_state=bq_state) # Evaluate the integrand at new nodes new_fun_evals = fun(new_nodes) integral_belief, bq_state = self.belief_update( bq_state=bq_state, new_nodes=new_nodes, new_fun_evals=new_fun_evals, ) # Update the state of the iteration info = BQIterInfo.from_iteration(info=info, dnevals=self.policy.batch_size) # Evaluate stopping criteria _has_converged = self.has_converged(bq_state=bq_state, info=info) yield integral_belief, new_nodes, new_fun_evals, bq_state, info
def _rescale(self, rvs): """Rescales covariances according to estimate sigma squared value.""" rvs = [Normal(rv.mean, self.sigma_squared_mle * rv.cov) for rv in rvs] return rvs