Esempio n. 1
0
    def postprocess(self, odesol):
        """If specified (at initialisation), smooth the filter output."""
        locations = odesol.kalman_posterior.locations
        rv_list = odesol.kalman_posterior.states

        kalman_posterior = filtsmooth.gaussian.FilteringPosterior(
            transition=self.prior_process.transition,
            diffusion_model=self.diffusion_model,
        )

        # Constant diffusion model is the only way to go here.
        s = (
            self.diffusion_model.diffusion
            if self._calibrate_all_states_post_hoc
            else 1.0
        )

        for idx, (t, rv) in enumerate(zip(locations, rv_list)):
            kalman_posterior.append(
                location=t,
                state=randvars.Normal(
                    mean=rv.mean,
                    cov=s * rv.cov,
                    cov_cholesky=np.sqrt(s) * rv.cov_cholesky,
                ),
            )

        if self.with_smoothing is True:

            squared_diffusion_list = self.diffusion_model(locations[1:])
            rv_list = self.prior_process.transition.smooth_list(
                rv_list, locations, _diffusion_list=squared_diffusion_list
            )
            kalman_posterior = filtsmooth.gaussian.SmoothingPosterior(
                filtering_posterior=kalman_posterior,
                transition=self.prior_process.transition,
                locations=locations,
                states=rv_list,
                diffusion_model=self.diffusion_model,
            )

        return _odefilter_solution.ODEFilterSolution(kalman_posterior)
Esempio n. 2
0
    def _solve_mde_forward_classic(self, rv, t, dt, _diffusion=1.0):
        """Solve forward moment differential equations (MDEs)."""
        dim = rv.mean.shape[0]
        mde, y0 = self._setup_vectorized_mde_forward_classic(
            rv,
            _diffusion=_diffusion,
        )

        sol, new_mean, new_cov = self._solve_mde_forward(mde, y0, t, dt, dim)

        # Useful for backward transitions
        # Aka continuous time smoothing.
        sol_mean = lambda t: sol.sol(t)[:dim]
        sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))

        return randvars.Normal(mean=new_mean, cov=new_cov), {
            "sol": sol,
            "sol_mean": sol_mean,
            "sol_cov": sol_cov,
        }
Esempio n. 3
0
    def __call__(
        self,
        *,
        ivp: problems.InitialValueProblem,
        prior_process: randprocs.markov.MarkovProcess,
    ) -> randvars.RandomVariable:

        num_derivatives = prior_process.transition.num_derivatives

        f, y0 = self._make_autonomous(ivp=ivp)

        mean_matrix = self._compute_ode_derivatives(
            f=f, y0=y0, num_derivatives=num_derivatives)
        mean = mean_matrix.reshape((-1, ), order="F")
        zeros = jnp.zeros((mean.shape[0], mean.shape[0]))
        return randvars.Normal(
            mean=np.asarray(mean),
            cov=np.asarray(zeros),
            cov_cholesky=np.asarray(zeros),
        )
Esempio n. 4
0
def test_same_backward_outputs(both_transitions, diffusion, rng):
    trans1, trans2 = both_transitions
    real = 1 + 0.1 * np.random.rand(trans1.state_dimension)
    real2 = 1 + 0.1 * np.random.rand(trans1.state_dimension)
    cov = linalg_zoo.random_spd_matrix(rng, dim=trans1.state_dimension)
    rv = randvars.Normal(real2, cov)
    out_1, info1 = trans1.backward_realization(real,
                                               rv,
                                               t=0.0,
                                               dt=0.5,
                                               compute_gain=True,
                                               _diffusion=diffusion)
    out_2, info2 = trans2.backward_realization(real,
                                               rv,
                                               t=0.0,
                                               dt=0.5,
                                               compute_gain=True,
                                               _diffusion=diffusion)
    np.testing.assert_allclose(out_1.mean, out_2.mean)
    np.testing.assert_allclose(out_1.cov, out_2.cov)
Esempio n. 5
0
def explore_exploit_policy(
    fun: Callable[[FloatLike], FloatLike],
    fun_params0: randvars.RandomVariable,
    rng: np.random.Generator,
) -> float:
    """Policy exploring around the estimate of the minimum based on the certainty about
    the parameters.

    Parameters
    ----------
    fun :
        One-dimensional objective function.
    fun_params0 :
        Belief over the parameters of the quadratic objective.
    rng :
        Random number generator.
    """
    a0, b0, _ = fun_params0
    sample = randvars.Normal(0, np.trace(fun_params0.cov)).sample(rng=rng)
    return -b0.mean / a0.mean + sample
Esempio n. 6
0
def stochastic_policy(
    fun: Callable[[FloatArgType], FloatArgType],
    fun_params0: randvars.RandomVariable,
    random_state: RandomStateArgType = None,
) -> float:
    """Policy returning a random action.

    Parameters
    ----------
    fun :
        One-dimensional objective function.
    fun_params0 :
        Belief over the parameters of the quadratic objective.
    random_state :
        Random state of the policy. If None (or np.random), the global np.random state
        is used. If integer, it is used to seed the local
        :class:`~numpy.random.RandomState` instance.
    """
    return randvars.Normal(mean=0.0, cov=1.0,
                           random_state=random_state).sample()
Esempio n. 7
0
    def test_cov_cholesky_cov_cholesky_not_passed(self):
        """No cov_cholesky is passed in init.

        In this case, the "is_precomputed" flag is False, a cov_cholesky is computed on
        demand, but can also be computed manually with any damping factor.
        """
        rv = randvars.Normal(
            mean=np.random.uniform(size=(2, 2)),
            cov=random_spd_matrix(rng=self.rng, dim=4),
        )

        with self.subTest("No Cholesky precomputed"):
            self.assertFalse(rv.cov_cholesky_is_precomputed)

        with self.subTest("Cholesky factor is computed correctly"):
            # The default damping factor 1e-12 does not mess up this test
            self.assertAllClose(rv.cov_cholesky, np.linalg.cholesky(rv.cov))

        with self.subTest("Cholesky is precomputed"):
            self.assertTrue(rv.cov_cholesky_is_precomputed)
    def test_as_transition(self, fitzhughnagumo):
        # Nothin happens unless an ODE has been incorporated
        with pytest.raises(ValueError):
            self.info_op.as_transition()

        # Basic functionality works: default arguments
        self.info_op.incorporate_ode(ode=fitzhughnagumo)
        transition = self.info_op.as_transition()
        assert isinstance(transition,
                          randprocs.markov.discrete.NonlinearGaussian)

        noise_fun = lambda t: randvars.Normal(
            mean=np.zeros(self.info_op.output_dim),
            cov=np.eye(self.info_op.output_dim))
        transition = self.info_op.as_transition(noise_fun=noise_fun, )
        noise = transition.noise_fun(0.0)
        assert isinstance(transition,
                          randprocs.markov.discrete.NonlinearGaussian)
        assert np.linalg.norm(noise.cov) > 0.0
        assert np.linalg.norm(noise.cov_cholesky) > 0.0
Esempio n. 9
0
    def setup(self, linearization_implementation):
        measvar = 0.1024
        initrv = randvars.Normal(np.ones(2), measvar * np.eye(2))
        rng = np.random.default_rng(seed=1)
        regression_problem, info = filtsmooth_zoo.pendulum(
            rng=rng,
            measurement_variance=measvar,
            timespan=(0.0, 4.0),
            step=0.0075,
            initrv=initrv,
        )
        prior_process = info["prior_process"]

        linearization, implementation = linearization_implementation
        _lin_method = {
            "ekf":
            functools.partial(
                filtsmooth.gaussian.approx.DiscreteEKFComponent,
                forward_implementation=implementation,
                backward_implementation=implementation,
            ),
            "ukf":
            filtsmooth.gaussian.approx.DiscreteUKFComponent,
        }[linearization]

        linearized_dynmod = _lin_method(prior_process.transition)
        linearized_measmod = _lin_method(
            regression_problem.measurement_models[0])
        regression_problem.measurement_models = [linearized_measmod] * len(
            regression_problem.locations)

        prior_process = randprocs.markov.MarkovProcess(
            transition=linearized_dynmod,
            initrv=prior_process.initrv,
            initarg=regression_problem.locations[0],
        )

        self.kalman_filter = filtsmooth.gaussian.Kalman(
            prior_process=prior_process)
        self.filtering_posterior, _ = self.kalman_filter.filter(
            regression_problem)
Esempio n. 10
0
    def forward_rv(self,
                   rv,
                   t,
                   compute_gain=False,
                   _diffusion=1.0,
                   _linearise_at=None,
                   **kwargs) -> Tuple[randvars.Normal, Dict]:
        compute_sigmapts_at = _linearise_at if _linearise_at is not None else rv
        self.sigma_points = self.assemble_sigma_points(
            at_this_rv=compute_sigmapts_at)

        proppts = self.ut.propagate(t, self.sigma_points,
                                    self.non_linear_model.state_trans_fun)
        meascov = _diffusion * self.non_linear_model.proc_noise_cov_mat_fun(t)
        mean, cov, crosscov = self.ut.estimate_statistics(
            proppts, self.sigma_points, meascov, rv.mean)
        info = {"crosscov": crosscov}
        if compute_gain:
            gain = crosscov @ np.linalg.inv(cov)
            info["gain"] = gain
        return randvars.Normal(mean, cov), info
Esempio n. 11
0
def test_perfect_information(solver: ProbabilisticLinearSolver,
                             problem: problems.LinearSystem, ncols: int):
    """Test whether a solver given perfect information converges instantly."""

    # Construct prior belief with perfect information
    belief = beliefs.LinearSystemBelief(
        x=randvars.Normal(mean=problem.solution,
                          cov=linops.Scaling(factors=0.0,
                                             shape=(ncols, ncols))),
        A=randvars.Constant(problem.A),
        Ainv=randvars.Constant(np.linalg.inv(problem.A @ np.eye(ncols))),
    )

    # Run solver
    belief, solver_state = solver.solve(prior=belief,
                                        problem=problem,
                                        rng=np.random.default_rng(1))

    # Check for instant convergence
    assert solver_state.step == 0
    np.testing.assert_allclose(belief.x.mean, problem.solution)
Esempio n. 12
0
    def test_symmetric_samples(self):
        """Samples from a normal distribution with symmetric Kronecker kernels of two
        symmetric matrices are symmetric."""

        n = 3
        A = self.rng.uniform(size=(n, n))
        A = 0.5 * (A + A.T) + n * np.eye(n)
        rv = randvars.Normal(
            mean=np.eye(A.shape[0]),
            cov=linops.SymmetricKronecker(A=A),
        )
        rv = rv.sample(rng=self.rng, size=10)
        for i, B in enumerate(rv):
            self.assertAllClose(
                B,
                B.T,
                atol=1e-5,
                rtol=1e-5,
                msg=
                "Sample {} from symmetric Kronecker distribution is not symmetric."
                .format(i),
            )
Esempio n. 13
0
    def test_reshape(self):
        rv = randvars.Normal(
            mean=np.random.uniform(size=(4, 3)),
            cov=linops.Kronecker(A=random_spd_matrix(4),
                                 B=random_spd_matrix(3)).todense(),
        )

        newshape = (2, 6)
        reshaped_rv = rv.reshape(newshape)

        self.assertArrayEqual(reshaped_rv.mean, rv.mean.reshape(newshape))
        self.assertArrayEqual(reshaped_rv.cov, rv.cov)

        # Test sampling
        rv.random_state = 42
        dist_sample = rv.sample(size=5)

        reshaped_rv.random_state = 42
        dist_reshape_sample = reshaped_rv.sample(size=5)

        self.assertArrayEqual(dist_reshape_sample,
                              dist_sample.reshape((-1, ) + newshape))
Esempio n. 14
0
def explore_exploit_policy(
    fun: Callable[[FloatArgType], FloatArgType],
    fun_params0: randvars.RandomVariable,
    random_state: RandomStateArgType = None,
) -> float:
    """Policy exploring around the estimate of the minimum based on the certainty about
    the parameters.

    Parameters
    ----------
    fun :
        One-dimensional objective function.
    fun_params0 :
        Belief over the parameters of the quadratic objective.
    random_state :
        Random state of the policy. If None (or np.random), the global np.random state
        is used. If integer, it is used to seed the local
        :class:`~numpy.random.RandomState` instance.
    """
    a0, b0, _ = fun_params0
    return (-b0.mean / a0.mean + randvars.Normal(
        0, np.trace(fun_params0.cov), random_state=random_state).sample())
Esempio n. 15
0
    def _forward_rv_sqrt(
        self, rv, t, compute_gain=False, _diffusion=1.0
    ) -> (randvars.RandomVariable, typing.Dict):

        H = self.state_trans_mat_fun(t)
        SR = self.proc_noise_cov_cholesky_fun(t)
        shift = self.shift_vec_fun(t)

        new_mean = H @ rv.mean + shift
        new_cov_cholesky = cholesky_update(
            H @ rv.cov_cholesky, np.sqrt(_diffusion) * SR
        )
        new_cov = new_cov_cholesky @ new_cov_cholesky.T
        crosscov = rv.cov @ H.T
        info = {"crosscov": crosscov}
        if compute_gain:
            gain = crosscov @ np.linalg.inv(new_cov)
            info["gain"] = gain
        return (
            randvars.Normal(new_mean, cov=new_cov, cov_cholesky=new_cov_cholesky),
            info,
        )
Esempio n. 16
0
    def _solve_mde_backward(self, rv_obtained, rv, t, dt, _diffusion=1.0):
        """Solve backward moment differential equations (MDEs)."""
        _, mde_forward_info = self._mde_forward_implementation(
            rv, t, dt, _diffusion=_diffusion)

        mde_forward_sol_mean = mde_forward_info["sol_mean"]
        mde_forward_sol_cov = mde_forward_info["sol_cov"]

        mde, y0 = self._setup_vectorized_mde_backward(
            rv_obtained,
            _diffusion=_diffusion,
        )
        # Use forward solution for mean and covariance in scipy's ivp
        # Dense output for lambda-expression
        sol = scipy.integrate.solve_ivp(
            mde,
            (t + dt, t),
            y0,
            method=self._mde_solver,
            atol=self._mde_atol,
            rtol=self._mde_rtol,
            args=(mde_forward_sol_mean, mde_forward_sol_cov),
            dense_output=True,
        )
        dim = rv.mean.shape[0]
        y_end = sol.y[:, -1]
        new_mean = y_end[:dim]
        new_cov = y_end[dim:].reshape((dim, dim))

        # Useful for backward transitions
        # Aka continuous time smoothing.
        sol_mean = lambda t: sol.sol(t)[:dim]
        sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))

        return randvars.Normal(mean=new_mean, cov=new_cov), {
            "sol": sol,
            "sol_mean": sol_mean,
            "sol_cov": sol_cov,
        }
Esempio n. 17
0
def test_bad_args_shape():
    rng = np.random.default_rng(seed=1)
    time_domain = (0.0, 10.0)
    time_grid = np.arange(*time_domain)

    order = 2
    spatialdim = 2

    dynamics = randprocs.markov.integrator.IntegratedWienerTransition(
        num_derivatives=order,
        wiener_process_dimension=spatialdim,
    )
    initrv = randvars.Normal(
        np.ones(dynamics.state_dimension),
        np.eye(dynamics.state_dimension),
    )
    prior_process = randprocs.markov.MarkovProcess(initarg=time_domain[0],
                                                   initrv=initrv,
                                                   transition=dynamics)

    with pytest.raises(ValueError):
        prior_process.sample(rng=rng, args=time_grid.reshape(-1, 1))
Esempio n. 18
0
    def _estimate_local_error(self, pred_rv, t_new, calibrated_proc_noise_cov,
                              calibrated_proc_noise_cov_cholesky, **kwargs):
        """Estimate the local errors.

        This corresponds to the approach in [1], implemented such that it is compatible
        with the EKF1 and UKF.

        References
        ----------
        .. [1] Schober, M., Särkkä, S. and Hennig, P..
            A probabilistic model for the numerical solution of initial
            value problems.
            Statistics and Computing, 2019.
        """
        local_pred_rv = randvars.Normal(
            pred_rv.mean,
            calibrated_proc_noise_cov,
            cov_cholesky=calibrated_proc_noise_cov_cholesky,
        )
        local_meas_rv, _ = self.gfilt.measure(local_pred_rv, t_new)
        error = local_meas_rv.cov.diagonal()
        return np.sqrt(np.abs(error))
Esempio n. 19
0
def gaussian_belief_update(
    fun_params0: pn.RandomVariable,
    action: FloatArgType,
    observation: FloatArgType,
    noise_cov: Union[np.ndarray, linops.LinearOperator],
) -> pn.RandomVariable:
    """Update the belief over the parameters with an observation.

    Parameters
    ----------
    fun_params0 :
        Belief over the parameters of the quadratic objective.
    action :
        Action of the probabilistic quadratic optimizer.
    observation :
        Observation of the problem corresponding to the given `action`.
    noise_cov :
        *shape=(3, 3)* -- Covariance of the noise on the parameters of the quadratic
        objective given by the assumed observation model.
    """
    # Feature vector
    x = np.asarray(action).reshape(1, -1)
    Phi = np.vstack((0.5 * x ** 2, x, np.ones_like(x)))

    # Mean and covariance
    mu = fun_params0.mean
    Sigma = fun_params0.cov

    # Gram matrix
    gram = Phi.T @ (Sigma + noise_cov) @ Phi

    # Posterior Mean
    m = mu + Sigma @ Phi @ np.linalg.solve(gram, observation - Phi.T @ mu)

    # Posterior Covariance
    S = Sigma - Sigma @ Phi @ np.linalg.solve(gram, Phi.T @ Sigma)

    return rvs.Normal(mean=m, cov=S)
Esempio n. 20
0
    def discretise(self, dt):
        """Return a discrete transition model (i.e. mild solution to SDE) using matrix
        fraction decomposition.

        That is, matrices A(h) and Q(h) and vector s(h) such
        that the transition is

        .. math:: x | x_\\text{old} \\sim \\mathcal{N}(A(h) x_\\text{old} + s(h), Q(h)),

        which is the transition of the mild solution to the LTI SDE.
        """

        if np.linalg.norm(self.force_vector) > 0:
            zeros = np.zeros((self.state_dimension, self.state_dimension))
            eye = np.eye(self.state_dimension)
            drift_matrix = np.block([[self.drift_matrix, eye], [zeros, zeros]])
            dispersion_matrix = np.concatenate(
                (self.dispersion_matrix, np.zeros(self.dispersion_matrix.shape))
            )
            ah_stack, qh_stack, _ = _mfd.matrix_fraction_decomposition(
                drift_matrix, dispersion_matrix, dt
            )
            proj = np.eye(self.state_dimension, 2 * self.state_dimension)
            proj_rev = np.flip(proj, axis=1)
            ah = proj @ ah_stack @ proj.T
            sh = proj @ ah_stack @ proj_rev.T @ self.force_vector
            qh = proj @ qh_stack @ proj.T
        else:
            ah, qh, _ = _mfd.matrix_fraction_decomposition(
                self.drift_matrix, self.dispersion_matrix, dt
            )
            sh = np.zeros(len(ah))
        return discrete.LTIGaussian(
            transition_matrix=ah,
            noise=randvars.Normal(mean=sh, cov=qh),
            forward_implementation=self._forward_implementation_string,
            backward_implementation=self._backward_implementation_string,
        )
Esempio n. 21
0
    def test_cov_cholesky_cov_cholesky_passed(self):
        """A value for cov_cholesky is passed in init.

        In this case, the "is_precomputed" flag is True, the cov_cholesky returns the
        argument that has been passed, but (p)recomputing overwrites the argument with a
        new factor.
        """
        mean, cov = self.params

        # This is purposely not the correct Cholesky factor for test reasons
        cov_cholesky = np.random.rand(*cov.shape)

        rv = randvars.Normal(mean, cov, cov_cholesky=cov_cholesky)

        with self.subTest("Cholesky precomputed"):
            self.assertTrue(rv.cov_cholesky_is_precomputed)

        with self.subTest("Returns correct cov_cholesky"):
            self.assertAllClose(rv.cov_cholesky, cov_cholesky)

        with self.subTest("self.precompute raises exception"):
            with self.assertRaises(Exception):
                rv.precompute_cov_cholesky()
Esempio n. 22
0
    def _forward_rv_classic(
            self,
            rv,
            t,
            compute_gain=False,
            _diffusion=1.0) -> Tuple[randvars.RandomVariable, typing.Dict]:
        H = self.transition_matrix_fun(t)
        noise = self.noise_fun(t)
        shift, R = noise.mean, noise.cov

        new_mean = H @ rv.mean + shift
        crosscov = rv.cov @ H.T
        new_cov = H @ crosscov + _diffusion * R
        info = {"crosscov": crosscov}
        if compute_gain:
            if config.matrix_free:
                gain = crosscov @ new_cov.inv()
            else:
                gain = scipy.linalg.solve(new_cov.T,
                                          crosscov.T,
                                          assume_a="sym").T
            info["gain"] = gain
        return randvars.Normal(new_mean, cov=new_cov), info
Esempio n. 23
0
    def test_reshape(self):
        rv = randvars.Normal(
            mean=np.random.uniform(size=(4, 3)),
            cov=linops.Kronecker(
                A=random_spd_matrix(rng=self.rng, dim=4),
                B=random_spd_matrix(rng=self.rng, dim=3),
            ).todense(),
        )

        newshape = (2, 6)
        reshaped_rv = rv.reshape(newshape)

        self.assertArrayEqual(reshaped_rv.mean, rv.mean.reshape(newshape))
        self.assertArrayEqual(reshaped_rv.cov, rv.cov)

        # Test sampling
        fixed_rng = np.random.default_rng(seed=self.seed)
        dist_sample = rv.sample(rng=fixed_rng, size=5)
        fixed_rng = np.random.default_rng(seed=self.seed)
        dist_reshape_sample = reshaped_rv.sample(rng=fixed_rng, size=5)

        self.assertArrayEqual(dist_reshape_sample,
                              dist_sample.reshape((-1, ) + newshape))
Esempio n. 24
0
    def _forward_rv_classic(
            self,
            rv,
            t,
            compute_gain=False,
            _diffusion=1.0) -> Tuple[randvars.RandomVariable, typing.Dict]:
        H = self.state_trans_mat_fun(t)
        R = self.proc_noise_cov_mat_fun(t)
        shift = self.shift_vec_fun(t)

        new_mean = H @ rv.mean + shift
        crosscov = rv.cov @ H.T
        new_cov = H @ crosscov + _diffusion * R
        info = {"crosscov": crosscov}
        if compute_gain:
            if config.matrix_free:
                gain = (new_cov.T.inv() @ crosscov.T).T
            else:
                gain = scipy.linalg.solve(new_cov.T,
                                          crosscov.T,
                                          assume_a="sym").T
            info["gain"] = gain
        return randvars.Normal(new_mean, cov=new_cov), info
    def __call__(
        self, solver_state: "probnum.linalg.solvers.LinearSolverState"
    ) -> LinearSystemBelief:

        action_A = solver_state.action @ solver_state.problem.A
        pred = action_A @ solver_state.belief.x.mean
        proj_resid = solver_state.observation - pred
        cov_xy = solver_state.belief.x.cov @ action_A.T
        gram = action_A @ cov_xy + self._noise_var
        gram_pinv = 1.0 / gram if gram > 0.0 else 0.0
        gain = cov_xy * gram_pinv
        cov_update = gain @ cov_xy.T

        x = randvars.Normal(
            mean=solver_state.belief.x.mean + gain * proj_resid,
            cov=solver_state.belief.x.cov - cov_update,
        )
        Ainv = solver_state.belief.Ainv + cov_update

        return LinearSystemBelief(x=x,
                                  A=solver_state.belief.A,
                                  Ainv=Ainv,
                                  b=solver_state.belief.b)
Esempio n. 26
0
def test_ioup_construction(driftspeed, initarg, num_derivatives,
                           wiener_process_dimension, use_initrv, diffuse):
    if use_initrv:
        d = (num_derivatives + 1) * wiener_process_dimension
        initrv = randvars.Normal(np.arange(d), np.diag(np.arange(1, d + 1)))
    else:
        initrv = None
    if use_initrv and diffuse:
        with pytest.warns(Warning):
            randprocs.markov.integrator.IntegratedOrnsteinUhlenbeckProcess(
                driftspeed=driftspeed,
                initarg=initarg,
                num_derivatives=num_derivatives,
                wiener_process_dimension=wiener_process_dimension,
                initrv=initrv,
                diffuse=diffuse,
            )

    else:
        ioup = randprocs.markov.integrator.IntegratedOrnsteinUhlenbeckProcess(
            driftspeed=driftspeed,
            initarg=initarg,
            num_derivatives=num_derivatives,
            wiener_process_dimension=wiener_process_dimension,
            initrv=initrv,
            diffuse=diffuse,
        )

        isinstance(
            ioup,
            randprocs.markov.integrator.IntegratedOrnsteinUhlenbeckProcess,
        )
        isinstance(ioup, randprocs.markov.MarkovProcess)
        isinstance(
            ioup.transition,
            randprocs.markov.integrator.IntegratedOrnsteinUhlenbeckTransition,
        )
Esempio n. 27
0
    def __init__(
        self,
        lengthscale,
        initarg,
        num_derivatives=1,
        wiener_process_dimension=1,
        initrv=None,
        diffuse=False,
        forward_implementation="classic",
        backward_implementation="classic",
    ):
        matern_transition = MaternTransition(
            num_derivatives=num_derivatives,
            wiener_process_dimension=wiener_process_dimension,
            lengthscale=lengthscale,
            forward_implementation=forward_implementation,
            backward_implementation=backward_implementation,
        )
        if initrv is not None and diffuse:
            warnings.warn(
                "Parameter `diffuse` has no effect, because an `initrv` has been provided."
            )
        if initrv is None:
            if diffuse:
                scale_cholesky = 1e3
            else:
                scale_cholesky = 1.0
            zeros = np.zeros(matern_transition.state_dimension)
            cov_cholesky = scale_cholesky * np.eye(
                matern_transition.state_dimension)
            initrv = randvars.Normal(mean=zeros,
                                     cov=cov_cholesky**2,
                                     cov_cholesky=cov_cholesky)

        super().__init__(transition=matern_transition,
                         initrv=initrv,
                         initarg=initarg)
Esempio n. 28
0
    def __init__(
        self,
        driftspeed,
        initarg,
        num_derivatives=1,
        wiener_process_dimension=1,
        initrv=None,
        diffuse=False,
        forward_implementation="classic",
        backward_implementation="classic",
    ):
        ioup_transition = IntegratedOrnsteinUhlenbeckTransition(
            num_derivatives=num_derivatives,
            wiener_process_dimension=wiener_process_dimension,
            driftspeed=driftspeed,
            forward_implementation=forward_implementation,
            backward_implementation=backward_implementation,
        )
        if initrv is not None and diffuse:
            warnings.warn("Parameter `diffuse` has no effect, "
                          "because an `initrv` has been provided.")
        if initrv is None:
            if diffuse:
                scale_cholesky = 1e3
            else:
                scale_cholesky = 1.0
            zeros = np.zeros(ioup_transition.state_dimension)
            cov_cholesky = scale_cholesky * np.eye(
                ioup_transition.state_dimension)
            initrv = randvars.Normal(mean=zeros,
                                     cov=cov_cholesky**2,
                                     cov_cholesky=cov_cholesky)

        super().__init__(transition=ioup_transition,
                         initrv=initrv,
                         initarg=initarg)
Esempio n. 29
0
def initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv):
    """Initialize an ODE filter with Taylor-mode automatic differentiation.

    This requires JAX. For an explanation of what happens ``under the hood``, see [1]_.

    References
    ----------
    .. [1] Krämer, N. and Hennig, P., Stable implementation of probabilistic ODE solvers,
       *arXiv:2012.10106*, 2020.


    The implementation is inspired by the implementation in
    https://github.com/jacobjinkelly/easy-neural-ode/blob/master/latent_ode.py

    Parameters
    ----------
    f
        ODE vector field.
    y0
        Initial value.
    t0
        Initial time point.
    prior
        Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
    initrv
        Initial random variable.

    Returns
    -------
    Normal
        Estimated initial random variable. Compatible with the specified prior.


    Examples
    --------

    >>> import sys, pytest
    >>> if not sys.platform.startswith('linux'):
    ...     pytest.skip()

    >>> from dataclasses import astuple
    >>> from probnum.randvars import Normal
    >>> from probnum.problems.zoo.diffeq import threebody_jax, vanderpol_jax
    >>> from probnum.statespace import IBM

    Compute the initial values of the restricted three-body problem as follows

    >>> f, t0, tmax, y0, df, *_ = astuple(threebody_jax())
    >>> print(y0)
    [ 0.994       0.          0.         -2.00158511]

    >>> prior = IBM(ordint=3, spatialdim=4)
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
    >>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
    >>> print(prior.proj2coord(0) @ improved_initrv.mean)
    [ 0.994       0.          0.         -2.00158511]
    >>> print(improved_initrv.mean)
    [ 9.94000000e-01  0.00000000e+00 -3.15543023e+02  0.00000000e+00
      0.00000000e+00 -2.00158511e+00  0.00000000e+00  9.99720945e+04
      0.00000000e+00 -3.15543023e+02  0.00000000e+00  6.39028111e+07
     -2.00158511e+00  0.00000000e+00  9.99720945e+04  0.00000000e+00]

    Compute the initial values of the van-der-Pol oscillator as follows

    >>> f, t0, tmax, y0, df, *_ = astuple(vanderpol_jax())
    >>> print(y0)
    [2. 0.]
    >>> prior = IBM(ordint=3, spatialdim=2)
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
    >>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
    >>> print(prior.proj2coord(0) @ improved_initrv.mean)
    [2. 0.]
    >>> print(improved_initrv.mean)
    [    2.     0.    -2.    60.     0.    -2.    60. -1798.]
    >>> print(improved_initrv.std)
    [0. 0. 0. 0. 0. 0. 0. 0.]
    """

    try:
        import jax.numpy as jnp
        from jax.config import config
        from jax.experimental.jet import jet

        config.update("jax_enable_x64", True)
    except ImportError as err:
        raise ImportError(
            "Cannot perform Taylor-mode initialisation without optional "
            "dependencies jax and jaxlib. Try installing them via `pip install jax jaxlib`."
        ) from err

    order = prior.ordint

    def total_derivative(z_t):
        """Total derivative."""
        z, t = jnp.reshape(z_t[:-1], z_shape), z_t[-1]
        dz = jnp.ravel(f(t, z))
        dt = jnp.array([1.0])
        dz_t = jnp.concatenate((dz, dt))
        return dz_t

    z_shape = y0.shape
    z_t = jnp.concatenate((jnp.ravel(y0), jnp.array([t0])))

    derivs = []

    derivs.extend(y0)
    if order == 0:
        all_derivs = statespace.Integrator._convert_derivwise_to_coordwise(
            np.asarray(jnp.array(derivs)), ordint=0, spatialdim=len(y0))

        return randvars.Normal(
            np.asarray(all_derivs),
            cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
            cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
        )

    (dy0, [*yns]) = jet(total_derivative, (z_t, ), ((jnp.ones_like(z_t), ), ))
    derivs.extend(dy0[:-1])
    if order == 1:
        all_derivs = statespace.Integrator._convert_derivwise_to_coordwise(
            np.asarray(jnp.array(derivs)), ordint=1, spatialdim=len(y0))

        return randvars.Normal(
            np.asarray(all_derivs),
            cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
            cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
        )

    for _ in range(1, order):
        (dy0, [*yns]) = jet(total_derivative, (z_t, ), ((dy0, *yns), ))
        derivs.extend(yns[-2][:-1])

    all_derivs = statespace.Integrator._convert_derivwise_to_coordwise(
        jnp.array(derivs), ordint=order, spatialdim=len(y0))

    return randvars.Normal(
        np.asarray(all_derivs),
        cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
        cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
    )
Esempio n. 30
0
def initialize_odefilter_with_rk(f,
                                 y0,
                                 t0,
                                 prior,
                                 initrv,
                                 df=None,
                                 h0=1e-2,
                                 method="DOP853"):
    r"""Initialize an ODE filter by fitting the prior process to a few steps of an approximate ODE solution computed with Scipy's RK.

    It goes as follows:

    1. The ODE integration problem is set up on the interval ``[t0, t0 + (2*order+1)*h0]``
    and solved with a call to ``scipy.integrate.solve_ivp``. The solver is uses adaptive steps with ``atol=rtol=1e-12``,
    but is forced to pass through the
    events ``(t0, t0+h0, t0 + 2*h0, ..., t0 + (2*order+1)*h0)``.
    The result is a vector of time points and states, with at least ``(2*order+1)``.
    Potentially, the adaptive steps selected many more steps, but because of the events, fewer steps cannot have happened.

    2. A prescribed prior is fitted to the first ``(2*order+1)`` (t, y) pairs of the solution. ``order`` is the order of the prior.

    3. The value of the resulting posterior at time ``t=t0`` is an estimate of the state and all its derivatives.
    The resulting marginal standard deviations estimate the error. This random variable is returned.

    Parameters
    ----------
    f
        ODE vector field.
    y0
        Initial value.
    t0
        Initial time point.
    prior
        Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
    initrv
        Initial random variable.
    df
        Jacobian of the ODE vector field. Optional. If specified, more components of the result will be exact.
    h0
        Maximum step-size to use for computing the approximate ODE solution. The smaller, the more accurate, but also, the smaller, the less stable.
        The best value here depends on the ODE problem, and probably the chosen method. Optional. Default is ``1e-2``.
    method
        Which solver to use. This is communicated as a string that is compatible with ``scipy.integrate.solve_ivp(..., method=method)``.
        Optional. Default is `DOP853`.

    Returns
    -------
    Normal
        Estimated (improved) initial random variable. Compatible with the specified prior.

    Examples
    --------

    >>> from dataclasses import astuple
    >>> from probnum.randvars import Normal
    >>> from probnum.statespace import IBM
    >>> from probnum.problems.zoo.diffeq import vanderpol

    Compute the initial values of the van-der-Pol problem as follows

    >>> f, t0, tmax, y0, df, *_ = astuple(vanderpol())
    >>> print(y0)
    [2. 0.]
    >>> prior = IBM(ordint=3, spatialdim=2)
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
    >>> improved_initrv = initialize_odefilter_with_rk(f, y0, t0, prior=prior, initrv=initrv, df=df)
    >>> print(prior.proj2coord(0) @ improved_initrv.mean)
    [2. 0.]
    >>> print(np.round(improved_initrv.mean, 1))
    [    2.      0.     -2.     58.2     0.     -2.     60.  -1745.7]
    >>> print(np.round(np.log10(improved_initrv.std), 1))
    [-13.8 -11.3  -9.   -1.5 -13.8 -11.3  -9.   -1.5]
    """
    y0 = np.asarray(y0)
    ode_dim = y0.shape[0] if y0.ndim > 0 else 1
    order = prior.ordint
    proj_to_y = prior.proj2coord(0)
    zeros_shift = np.zeros(ode_dim)
    zeros_cov = np.zeros((ode_dim, ode_dim))
    measmod = statespace.DiscreteLTIGaussian(
        proj_to_y,
        zeros_shift,
        zeros_cov,
        proc_noise_cov_cholesky=zeros_cov,
        forward_implementation="sqrt",
        backward_implementation="sqrt",
    )

    # order + 1 would suffice in theory, 2*order + 1 is for good measure
    # (the "+1" is a safety factor for order=1)
    num_steps = 2 * order + 1
    t_eval = np.arange(t0, t0 + (num_steps + 1) * h0, h0)
    sol = sci.solve_ivp(
        f,
        (t0, t0 + (num_steps + 1) * h0),
        y0=y0,
        atol=1e-12,
        rtol=1e-12,
        t_eval=t_eval,
        method=method,
    )

    ts = sol.t[:num_steps]
    ys = sol.y[:, :num_steps].T

    initmean = initrv.mean.copy()
    initmean[0::(order + 1)] = y0
    initmean[1::(order + 1)] = f(t0, y0)

    initcov_diag = np.diag(initrv.cov).copy()
    initcov_diag[0::(order + 1)] = SMALL_VALUE
    initcov_diag[1::(order + 1)] = SMALL_VALUE

    if df is not None:
        if order > 1:
            initmean[2::(order + 1)] = df(t0, y0) @ f(t0, y0)
            initcov_diag[2::(order + 1)] = SMALL_VALUE

    initcov = np.diag(initcov_diag)
    initcov_cholesky = np.diag(np.sqrt(initcov_diag))
    initrv = randvars.Normal(initmean, initcov, cov_cholesky=initcov_cholesky)
    kalman = filtsmooth.Kalman(prior, measmod, initrv)

    regression_problem = problems.RegressionProblem(observations=ys,
                                                    locations=ts)
    out, _ = kalman.filtsmooth(regression_problem)

    estimated_initrv = out.states[0]

    return estimated_initrv