Esempio n. 1
0
    def setUp(self):
        """Resources for tests."""
        # Random Seed
        np.random.seed(42)

        # Scalars, arrays and operators
        self.scalars = [0, int(1), 0.1, -4.2, np.nan, np.inf]
        self.arrays = [np.random.normal(size=[5, 4]), np.array([[3, 4], [1, 5]])]

        def mv(v):
            return np.array([2 * v[0], v[0] + 3 * v[1]])

        self.mv = mv
        self.ops = [
            linops.MatrixMult(np.array([[-1.5, 3], [0, -230]])),
            linops.LinearOperator(shape=(2, 2), matvec=mv),
            linops.Identity(shape=4),
            linops.Kronecker(
                A=linops.MatrixMult(np.array([[2, -3.5], [12, 6.5]])),
                B=linops.Identity(shape=3),
            ),
            linops.SymmetricKronecker(
                A=linops.MatrixMult(np.array([[1, -2], [-2.2, 5]])),
                B=linops.MatrixMult(np.array([[1, -3], [0, -0.5]])),
            ),
        ]
Esempio n. 2
0
    def test_posterior_mean_CG_equivalency(self):
        """The probabilistic linear solver(s) should recover CG iterates as a posterior
        mean for specific covariances."""

        # Linear system
        A, b = self.poisson_linear_system

        # Callback function to return CG iterates
        cg_iterates = []

        def callback_iterates_CG(xk):
            cg_iterates.append(
                np.eye(np.shape(A)[0]) @ xk
            )  # identity hack to actually save different iterations

        # Solve linear system

        # Initial guess as chosen by PLS: x0 = Ainv.mean @ b
        x0 = b

        # Conjugate gradient method
        xhat_cg, info_cg = scipy.sparse.linalg.cg(
            A=A, b=b, x0=x0, tol=10 ** -6, callback=callback_iterates_CG
        )
        cg_iters_arr = np.array([x0] + cg_iterates)

        # Matrix priors (encoding weak symmetric posterior correspondence)
        Ainv0 = rvs.Normal(
            mean=linops.Identity(A.shape[1]),
            cov=linops.SymmetricKronecker(A=linops.Identity(A.shape[1])),
        )
        A0 = rvs.Normal(
            mean=linops.Identity(A.shape[1]),
            cov=linops.SymmetricKronecker(A),
        )
        for kwargs in [{"assume_A": "sympos", "rtol": 10 ** -6}]:
            with self.subTest():
                # Define callback function to obtain search directions
                pls_iterates = []

                # pylint: disable=cell-var-from-loop
                def callback_iterates_PLS(
                    xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs
                ):
                    pls_iterates.append(xk.mean)

                # Probabilistic linear solver
                xhat_pls, _, _, info_pls = linalg.problinsolve(
                    A=A,
                    b=b,
                    Ainv0=Ainv0,
                    A0=A0,
                    callback=callback_iterates_PLS,
                    **kwargs
                )
                pls_iters_arr = np.array([x0] + pls_iterates)

                self.assertAllClose(xhat_pls.mean, xhat_cg, rtol=10 ** -12)
                self.assertAllClose(pls_iters_arr, cg_iters_arr, rtol=10 ** -12)
Esempio n. 3
0
    def setUp(self):
        """Resources for tests."""
        # Seed
        np.random.seed(seed=42)

        # Parameters
        m = 7
        n = 3
        self.constants = [-1, -2.4, 0, 200, np.pi]
        sparsemat = scipy.sparse.rand(m=m, n=n, density=0.1, random_state=1)
        self.normal_params = [
            # Univariate
            (-1.0, 3.0),
            (1, 3),
            # Multivariate
            (np.random.uniform(size=10), np.eye(10)),
            (np.random.uniform(size=10), random_spd_matrix(10)),
            # Matrixvariate
            (
                np.random.uniform(size=(2, 2)),
                linops.SymmetricKronecker(
                    A=np.array([[1.0, 2.0], [2.0, 1.0]]),
                    B=np.array([[5.0, -1.0], [-1.0, 10.0]]),
                ).todense(),
            ),
            # Operatorvariate
            (
                np.array([1.0, -5.0]),
                linops.Matrix(A=np.array([[2.0, 1.0], [1.0, -0.1]])),
            ),
            (
                linops.Matrix(A=np.array([[0.0, -5.0]])),
                linops.Identity(shape=(2, 2)),
            ),
            (
                np.array([[1.0, 2.0], [-3.0, -0.4], [4.0, 1.0]]),
                linops.Kronecker(A=np.eye(3), B=5 * np.eye(2)),
            ),
            (
                linops.Matrix(A=sparsemat.todense()),
                linops.Kronecker(0.1 * linops.Identity(m), linops.Identity(n)),
            ),
            (
                linops.Matrix(A=np.random.uniform(size=(2, 2))),
                linops.SymmetricKronecker(
                    A=np.array([[1.0, 2.0], [2.0, 1.0]]),
                    B=np.array([[5.0, -1.0], [-1.0, 10.0]]),
                ),
            ),
            # Symmetric Kronecker Identical Factors
            (
                linops.Identity(shape=25),
                linops.SymmetricKronecker(A=linops.Identity(25)),
            ),
        ]
Esempio n. 4
0
    def equivalent_discretisation_preconditioned(self):
        """Discretised IN THE PRECONDITIONED SPACE.

        The preconditioned state transition is the flipped Pascal matrix.
        The preconditioned process noise covariance is the flipped Hilbert matrix.
        The shift is always zero.

        Reference: https://arxiv.org/abs/2012.10106
        """

        state_transition_1d = np.flip(
            scipy.linalg.pascal(self.num_derivatives + 1, kind="lower", exact=False)
        )
        if config.matrix_free:
            state_transition = linops.Kronecker(
                A=linops.Identity(self.wiener_process_dimension),
                B=linops.aslinop(state_transition_1d),
            )
        else:
            state_transition = np.kron(
                np.eye(self.wiener_process_dimension), state_transition_1d
            )
        process_noise_1d = np.flip(scipy.linalg.hilbert(self.num_derivatives + 1))
        if config.matrix_free:
            process_noise = linops.Kronecker(
                A=linops.Identity(self.wiener_process_dimension),
                B=linops.aslinop(process_noise_1d),
            )
        else:
            process_noise = np.kron(
                np.eye(self.wiener_process_dimension), process_noise_1d
            )
        empty_shift = np.zeros(
            self.wiener_process_dimension * (self.num_derivatives + 1)
        )

        process_noise_cholesky_1d = np.linalg.cholesky(process_noise_1d)
        if config.matrix_free:
            process_noise_cholesky = linops.Kronecker(
                A=linops.Identity(self.wiener_process_dimension),
                B=linops.aslinop(process_noise_cholesky_1d),
            )
        else:
            process_noise_cholesky = np.kron(
                np.eye(self.wiener_process_dimension), process_noise_cholesky_1d
            )

        return discrete.LTIGaussian(
            state_trans_mat=state_transition,
            shift_vec=empty_shift,
            proc_noise_cov_mat=process_noise,
            proc_noise_cov_cholesky=process_noise_cholesky,
            forward_implementation=self.forward_implementation,
            backward_implementation=self.backward_implementation,
        )
Esempio n. 5
0
def _matmul_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal:
    if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[0] == 1):
        return _Normal(
            mean=norm_rv.mean @ constant_rv.support,
            cov=constant_rv.support.T @ (norm_rv.cov @ constant_rv.support),
            random_state=_utils.derive_random_seed(
                norm_rv.random_state, constant_rv.random_state
            ),
        )
    elif norm_rv.ndim == 2 and norm_rv.shape[0] > 1:
        cov_update = _linear_operators.Kronecker(
            _linear_operators.Identity(constant_rv.shape[0]), constant_rv.support
        )

        return _Normal(
            mean=norm_rv.mean @ constant_rv.support,
            cov=cov_update.T @ (norm_rv.cov @ cov_update),
            random_state=_utils.derive_random_seed(
                norm_rv.random_state, constant_rv.random_state
            ),
        )
    else:
        raise TypeError(
            "Currently, matrix multiplication is only supported for vector- and "
            "matrix-variate Gaussians."
        )
Esempio n. 6
0
def _matmul_normal_constant(norm_rv: _Normal,
                            constant_rv: _Constant) -> _Normal:
    if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[0] == 1):
        if norm_rv.cov_cholesky_is_precomputed:
            cov_cholesky = _utils.linalg.cholesky_update(
                constant_rv.support.T @ norm_rv.cov_cholesky)
        else:
            cov_cholesky = None
        return _Normal(
            mean=norm_rv.mean @ constant_rv.support,
            cov=constant_rv.support.T @ (norm_rv.cov @ constant_rv.support),
            cov_cholesky=cov_cholesky,
            random_state=_utils.derive_random_seed(norm_rv.random_state,
                                                   constant_rv.random_state),
        )
    elif norm_rv.ndim == 2 and norm_rv.shape[0] > 1:
        # This part does not do the Cholesky update,
        # because of performance configurations: currently, there is no way of switching
        # the Cholesky updates off, which might affect (large, potentially sparse) covariance matrices
        # of matrix-variate Normal RVs. See Issue #335.
        cov_update = _linear_operators.Kronecker(
            _linear_operators.Identity(constant_rv.shape[0]),
            constant_rv.support)

        return _Normal(
            mean=norm_rv.mean @ constant_rv.support,
            cov=cov_update.T @ (norm_rv.cov @ cov_update),
            random_state=_utils.derive_random_seed(norm_rv.random_state,
                                                   constant_rv.random_state),
        )
    else:
        raise TypeError(
            "Currently, matrix multiplication is only supported for vector- and "
            "matrix-variate Gaussians.")
Esempio n. 7
0
    def proj2coord(self, coord: int) -> np.ndarray:
        """Projection matrix to :math:`i` th coordinates.

        Computes the matrix

        .. math:: H_i = \\left[ I_d \\otimes e_i \\right] P^{-1},

        where :math:`e_i` is the :math:`i` th unit vector,
        that projects to the :math:`i` th coordinate of a vector.
        If the ODE is multidimensional, it projects to **each** of the
        :math:`i` th coordinates of each ODE dimension.

        Parameters
        ----------
        coord : int
            Coordinate index :math:`i` which to project to.
            Expected to be in range :math:`0 \\leq i \\leq q + 1`.

        Returns
        -------
        np.ndarray, shape=(d, d*(q+1))
            Projection matrix :math:`H_i`.
        """
        projvec1d = np.eye(self.num_derivatives + 1)[:, coord]
        projmat1d = projvec1d.reshape((1, self.num_derivatives + 1))
        if config.matrix_free:
            return linops.Kronecker(
                linops.Identity(self.wiener_process_dimension), projmat1d)

        return np.kron(np.eye(self.wiener_process_dimension), projmat1d)
Esempio n. 8
0
    def setup(self, matrix_free, len_trajectory, num_derivatives, dimension):
        with config(matrix_free=matrix_free):

            dynamics = randprocs.markov.integrator.IntegratedWienerTransition(
                num_derivatives=num_derivatives,
                wiener_process_dimension=dimension,
                forward_implementation="classic",
                backward_implementation="classic",
            )

            measvar = 0.1024
            initrv = randvars.Normal(
                np.ones(dynamics.state_dimension),
                measvar * linops.Identity(dynamics.state_dimension),
            )

            time_domain = (0.0, float(len_trajectory))
            self.time_grid = np.arange(*time_domain)
            self.markov_process = randprocs.markov.MarkovProcess(
                initarg=time_domain[0], initrv=initrv, transition=dynamics)

            rng = np.random.default_rng(seed=1)
            self.base_measure_realization = scipy.stats.norm.rvs(
                size=(self.time_grid.shape + initrv.shape),
                random_state=rng,
            )
Esempio n. 9
0
 def __call__(self, step):
     scaling_vector = np.abs(step) ** self.powers / self.scales
     if config.matrix_free:
         return linops.Kronecker(
             A=linops.Identity(self.dimension),
             B=linops.Scaling(factors=scaling_vector),
         )
     return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
Esempio n. 10
0
 def _drift_matrix(self):
     drift_matrix_1d = np.diag(np.ones(self.num_derivatives), 1)
     if config.matrix_free:
         return linops.Kronecker(
             A=linops.Identity(self.wiener_process_dimension),
             B=linops.Matrix(A=drift_matrix_1d),
         )
     return np.kron(np.eye(self.wiener_process_dimension), drift_matrix_1d)
Esempio n. 11
0
    def _dispersion_matrix(self):
        dispersion_matrix_1d = np.zeros(self.num_derivatives + 1)
        dispersion_matrix_1d[-1] = 1.0  # Unit diffusion

        if config.matrix_free:
            return linops.Kronecker(
                A=linops.Identity(self.wiener_process_dimension),
                B=linops.Matrix(A=dispersion_matrix_1d.reshape(-1, 1)),
            )
        return np.kron(np.eye(self.wiener_process_dimension), dispersion_matrix_1d).T
Esempio n. 12
0
def case_state_symmetric_matrix_based(rng: np.random.Generator, ):
    """State of a symmetric matrix-based linear solver."""
    prior = linalg.solvers.beliefs.LinearSystemBelief(
        A=randvars.Normal(
            mean=linops.Matrix(linsys.A),
            cov=linops.SymmetricKronecker(A=linops.Identity(n)),
        ),
        x=(Ainv @ b[:, None]).reshape((n, )),
        Ainv=randvars.Normal(
            mean=linops.Identity(n),
            cov=linops.SymmetricKronecker(A=linops.Identity(n)),
        ),
        b=b,
    )
    state = linalg.solvers.LinearSolverState(problem=linsys, prior=prior)
    state.action = rng.standard_normal(size=state.problem.A.shape[1])
    state.observation = rng.standard_normal(size=state.problem.A.shape[1])

    return state
def test_induced_solution_belief(rng: np.random.Generator):
    """Test whether a consistent belief over the solution is inferred from a belief over
    the inverse."""
    n = 5
    A = randvars.Constant(random_spd_matrix(dim=n, rng=rng))
    Ainv = randvars.Normal(
        mean=linops.Scaling(factors=1 / np.diag(A.mean)),
        cov=linops.SymmetricKronecker(linops.Identity(n)),
    )
    b = randvars.Constant(rng.normal(size=(n, 1)))
    prior = LinearSystemBelief(A=A, Ainv=Ainv, x=None, b=b)

    x_infer = Ainv @ b
    np.testing.assert_allclose(prior.x.mean, x_infer.mean)
    np.testing.assert_allclose(prior.x.cov.todense(), x_infer.cov.todense())
Esempio n. 14
0
def _matmul_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal:
    """Normal random variable multiplied with a vector or matrix.

    Computes the distribution of the random variable :math:`Y = XA`, where :math:`X`
    is a matrix- or multi-variate normal random variable and :math:`A` a constant.
    """
    if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[0] == 1):
        if norm_rv.cov_cholesky_is_precomputed:
            cov_cholesky = _utils.linalg.cholesky_update(
                constant_rv.support.T @ norm_rv.cov_cholesky
            )
        else:
            cov_cholesky = None

        mean = norm_rv.mean @ constant_rv.support
        cov = constant_rv.support.T @ (norm_rv.cov @ constant_rv.support)

        if cov.shape == () and mean.shape == (1,):
            cov = cov.reshape((1, 1))

        return _Normal(mean=mean, cov=cov, cov_cholesky=cov_cholesky)

    # This part does not do the Cholesky update,
    # because of performance configurations: currently, there is no way of switching
    # the Cholesky updates off, which might affect (large, potentially sparse)
    # covariance matrices of matrix-variate Normal RVs. See Issue #335.
    if constant_rv.support.ndim == 1:
        constant_rv_support = constant_rv.support[:, None]
    else:
        constant_rv_support = constant_rv.support

    cov_update = _linear_operators.Kronecker(
        _linear_operators.Identity(norm_rv.shape[0]), constant_rv_support.T
    )

    # Cov(rvec(XA)) = Cov((I (x) A.T)rvec(X)) = (I (x) A.T)Cov(rvec(X))(I (x) A.T).T
    return _Normal(
        mean=norm_rv.mean @ constant_rv.support,
        cov=cov_update @ (norm_rv.cov @ cov_update.T),
    )
Esempio n. 15
0
def _matmul_constant_normal(constant_rv: _Constant, norm_rv: _Normal) -> _Normal:
    """Matrix-multiplication with a normal random variable.

    Computes the distribution of the random variable :math:`Y = AX`, where :math:`X` is
    a matrix- or multi-variate normal random variable and :math:`A` a constant.
    """
    if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[1] == 1):
        if norm_rv.cov_cholesky_is_precomputed:
            cov_cholesky = _utils.linalg.cholesky_update(
                constant_rv.support @ norm_rv.cov_cholesky
            )
        else:
            cov_cholesky = None
        return _Normal(
            mean=constant_rv.support @ norm_rv.mean,
            cov=constant_rv.support @ (norm_rv.cov @ constant_rv.support.T),
            cov_cholesky=cov_cholesky,
        )

    # This part does not do the Cholesky update,
    # because of performance configurations: currently, there is no way of switching
    # the Cholesky updates off, which might affect (large, potentially sparse)
    # covariance matrices of matrix-variate Normal RVs. See Issue #335.
    if constant_rv.support.ndim == 1:
        constant_rv_support = constant_rv.support[None, :]
    else:
        constant_rv_support = constant_rv.support

    cov_update = _linear_operators.Kronecker(
        constant_rv_support,
        _linear_operators.Identity(norm_rv.shape[1]),
    )

    # Cov(rvec(AX)) = Cov((A (x) I)rvec(X)) = (A (x) I)Cov(rvec(X))(A (x) I).T
    return _Normal(
        mean=constant_rv.support @ norm_rv.mean,
        cov=cov_update @ (norm_rv.cov @ cov_update.T),
    )
Esempio n. 16
0
"""Probabilistic linear solver state test cases."""

import numpy as np
from pytest_cases import case

from probnum import linalg, linops, randvars
from probnum.problems.zoo.linalg import random_linear_system, random_spd_matrix

# Problem
n = 10
linsys = random_linear_system(rng=np.random.default_rng(42),
                              matrix=random_spd_matrix,
                              dim=n)

# Prior
Ainv = randvars.Normal(mean=linops.Identity(n),
                       cov=linops.SymmetricKronecker(linops.Identity(n)))
b = randvars.Constant(linsys.b)
prior = linalg.solvers.beliefs.LinearSystemBelief(
    A=randvars.Constant(linsys.A),
    Ainv=Ainv,
    x=Ainv @ b,
    b=b,
)


@case(tags=["initial"])
def case_initial_state():
    """Initial state of a linear solver."""
    return linalg.solvers.LinearSolverState(problem=linsys, prior=prior)
Esempio n. 17
0
import numpy as np
from pytest_cases import case

from probnum import linalg, linops, randvars
from probnum.problems.zoo.linalg import random_linear_system, random_spd_matrix

# Problem
n = 10
linsys = random_linear_system(
    rng=np.random.default_rng(42), matrix=random_spd_matrix, dim=n
)

# Prior
Ainv = randvars.Normal(
    mean=linops.Identity(n), cov=linops.SymmetricKronecker(linops.Identity(n))
)
b = randvars.Constant(linsys.b)
prior = linalg.solvers.beliefs.LinearSystemBelief(
    A=randvars.Constant(linsys.A),
    Ainv=Ainv,
    x=(Ainv @ b[:, None]).reshape(
        (n,)
    ),  # TODO: This can be replaced by Ainv @ b once https://github.com/probabilistic-numerics/probnum/issues/456 is fixed
    b=b,
)


@case(tags=["initial"])
def case_initial_state(
    rng: np.random.Generator,
Esempio n. 18
0
def vector() -> np.ndarray:
    rng = np.random.default_rng(526367 + n)
    return rng.standard_normal(size=(n, ))


@pytest.fixture(scope="module")
def vectors() -> np.ndarray:
    rng = np.random.default_rng(234 + n)
    return rng.standard_normal(size=(2, 10, n))


@pytest.fixture(
    scope="module",
    params=[
        np.eye(n),
        linops.Identity(n),
        linops.Scaling(factors=1.0, shape=(n, n)),
        np.inner,
    ],
)
def inprod(request) -> int:
    return request.param


@pytest.fixture(
    scope="module",
    params=[
        partial(double_gram_schmidt, gram_schmidt_fn=gram_schmidt),
        partial(double_gram_schmidt, gram_schmidt_fn=modified_gram_schmidt),
    ],
)
Esempio n. 19
0
    def _get_prior_params(self, A0, Ainv0, x0, b):
        """Get the parameters of the matrix priors on A and H.

        Retrieves and / or initializes prior parameters of ``A0`` and ``Ainv0``.

        Parameters
        ----------
        A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional
            A square matrix, linear operator or random variable representing the prior
            belief over the linear operator :math:`A`. If an array or linear operator is
            given, a prior distribution is chosen automatically. Ainv0 : array-like or
            LinearOperator or RandomVariable, shape=(n,n), optional A square matrix,
            linear operator or random variable representing the prior belief over the
            inverse :math:`H=A^{-1}`. This can be viewed as taking the form of a
            pre-conditioner. If an array or linear operator is given, a prior
            distribution is chosen automatically.
        x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs)
            Optional. Prior belief for the solution of the linear system. Will be
            ignored if ``A0`` or ``Ainv0`` is given.
        b : array_like, shape=(n,) or (n, nrhs)
            Right-hand side vector or matrix in :math:`A x = b`.

        Returns
        -------
        A0_mean : array-like or LinearOperator, shape=(n,n)
            Prior mean of the linear operator :math:`A`.
        A0_covfactor : array-like or LinearOperator, shape=(n,n)
            Factor :math:`W^A` of the symmetric Kronecker product prior covariance
            :math:`W^A \\otimes_s W^A` of :math:`A`.
        Ainv0_mean : array-like or LinearOperator, shape=(n,n)
            Prior mean of the linear operator :math:`H`.
        Ainv0_covfactor : array-like or LinearOperator, shape=(n,n)
            Factor :math:`W^H` of the symmetric Kronecker product prior covariance
            :math:`W^H \\otimes_s W^H` of :math:`H`.
        """
        self.is_calib_covclass = False
        # No matrix priors specified
        if A0 is None and Ainv0 is None:
            self.is_calib_covclass = True
            # No prior information given
            if x0 is None:
                Ainv0_mean = linops.Identity(shape=self.n)
                Ainv0_covfactor = linops.Identity(shape=self.n)
                # Symmetric posterior correspondence
                A0_mean = linops.Identity(shape=self.n)
                A0_covfactor = self.A
                return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor
            # Construct matrix priors from initial guess x0
            elif isinstance(x0, np.ndarray):
                A0_mean, Ainv0_mean = self._construct_symmetric_matrix_prior_means(
                    A=self.A, x0=x0, b=b)
                Ainv0_covfactor = Ainv0_mean
                # Symmetric posterior correspondence
                A0_covfactor = self.A
                return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor
            elif isinstance(x0, randvars.RandomVariable):
                raise NotImplementedError

        # Prior on Ainv specified
        if not isinstance(A0, randvars.RandomVariable) and Ainv0 is not None:
            if isinstance(Ainv0, randvars.RandomVariable):
                Ainv0_mean = Ainv0.mean
                Ainv0_covfactor = Ainv0.cov.A
            else:
                self.is_calib_covclass = True
                Ainv0_mean = Ainv0
                Ainv0_covfactor = Ainv0  # Symmetric posterior correspondence
            try:
                if A0 is not None:
                    A0_mean = A0
                elif isinstance(Ainv0, randvars.RandomVariable):
                    A0_mean = Ainv0.mean.inv()
                else:
                    A0_mean = Ainv0.inv()
            except AttributeError:
                warnings.warn(
                    "Prior specified only for Ainv. Inverting prior mean naively. "
                    "This operation is computationally costly! Specify an inverse "
                    "prior (mean) instead.")
                A0_mean = np.linalg.inv(Ainv0.mean)
            except NotImplementedError:
                A0_mean = linops.Identity(self.n)
                warnings.warn(
                    "Prior specified only for Ainv. Automatic prior mean inversion "
                    "not implemented, falling back to standard normal prior.")
            # Symmetric posterior correspondence
            A0_covfactor = self.A
            return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor

        # Prior on A specified
        elif A0 is not None and not isinstance(Ainv0, randvars.RandomVariable):
            if isinstance(A0, randvars.RandomVariable):
                A0_mean = A0.mean
                A0_covfactor = A0.cov.A
            else:
                self.is_calib_covclass = True
                A0_mean = A0
                A0_covfactor = A0  # Symmetric posterior correspondence
            try:
                if Ainv0 is not None:
                    Ainv0_mean = Ainv0
                elif isinstance(A0, randvars.RandomVariable):
                    Ainv0_mean = A0.mean.inv()
                else:
                    Ainv0_mean = A0.inv()
            except AttributeError:
                warnings.warn(
                    "Prior specified only for A. Inverting prior mean naively. "
                    "This operation is computationally costly! "
                    "Specify an inverse prior (mean).")
                Ainv0_mean = np.linalg.inv(A0.mean)
            except NotImplementedError:
                Ainv0_mean = linops.Identity(self.n)
                warnings.warn(
                    "Prior specified only for A. Automatic prior mean inversion "
                    "failed, falling back to standard normal prior.")
            # Symmetric posterior correspondence
            Ainv0_covfactor = Ainv0_mean
            return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor
        # Both matrix priors on A and H specified via random variables
        elif isinstance(A0, randvars.RandomVariable) and isinstance(
                Ainv0, randvars.RandomVariable):
            A0_mean = A0.mean
            A0_covfactor = A0.cov.A
            Ainv0_mean = Ainv0.mean
            Ainv0_covfactor = Ainv0.cov.A
            return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor
        else:
            raise NotImplementedError