Ejemplo n.º 1
0
def test_symmkronecker_todense_symmetric():
    """Dense matrix from symmetric Kronecker product of two symmetric matrices must be symmetric."""
    C = np.array([[5, 1], [1, 10]])
    D = np.array([[-2, .1], [.1, 8]])
    Ws = linear_operators.SymmetricKronecker(A=C, B=C)
    Ws_dense = Ws.todense()
    np.testing.assert_array_equal(Ws_dense, Ws_dense.T,
                                  err_msg="Symmetric Kronecker product of symmetric matrices is not symmetric.")
Ejemplo n.º 2
0
def test_matrixprior(matlinsolve):
    """Solve random linear system with a matrix-based linear solver."""
    np.random.seed(1)
    # Linear system
    n = 10
    A = np.random.rand(n, n)
    A = A.dot(A.T) + n * np.eye(n)  # Symmetrize and make diagonally dominant
    b = np.random.rand(n, 1)

    # Prior distribution on A
    covA = linear_operators.SymmetricKronecker(A=np.eye(n), B=np.eye(n))
    Ainv0 = probability.RandomVariable(distribution=probability.Normal(mean=np.eye(n), cov=covA))

    x, Ahat, Ainvhat, info = matlinsolve(A=A, Ainv0=Ainv0, b=b)
    xnp = np.linalg.solve(A, b).ravel()

    np.testing.assert_allclose(x.mean(), xnp, rtol=1e-4,
                               err_msg="Solution does not match np.linalg.solve.")
Ejemplo n.º 3
0
def test_symmetric_samples():
    """Samples from a normal distribution with symmetric Kronecker covariance of two symmetric matrices are
    symmetric."""
    np.random.seed(42)
    n = 3
    A = np.random.uniform(size=(n, n))
    A = 0.5 * (A + A.T) + n * np.eye(n)
    dist = probability.Normal(mean=np.eye(A.shape[0]),
                              cov=linear_operators.SymmetricKronecker(A=A))
    dist_sample = dist.sample(size=10)
    for i, B in enumerate(dist_sample):
        np.testing.assert_allclose(
            B,
            B.T,
            atol=1e-5,
            rtol=1e-5,
            err_msg=
            "Sample {} from symmetric Kronecker distribution is not symmetric."
            .format(i))
Ejemplo n.º 4
0
def test_posterior_distribution_parameters(matblinsolve, poisson_linear_system):
    """Compute the posterior parameters of the matrix-based probabilistic linear solvers directly and compare."""
    # Initialization
    A, f = poisson_linear_system
    S = []  # search directions
    Y = []  # observations

    # Priors
    H0 = linear_operators.Identity(A.shape[0])  # inverse prior mean
    A0 = linear_operators.Identity(A.shape[0])  # prior mean
    WH0 = H0  # inverse prior Kronecker factor
    WA0 = A  # prior Kronecker factor
    covH = linear_operators.SymmetricKronecker(WH0, WH0)
    covA = linear_operators.SymmetricKronecker(WA0, WA0)
    Ahat0 = probability.RandomVariable(distribution=probability.Normal(mean=A0, cov=covA))
    Ainvhat0 = probability.RandomVariable(distribution=probability.Normal(mean=H0, cov=covH))

    # Define callback function to obtain search directions
    def callback_postparams(xk, Ak, Ainvk, sk, yk, alphak, resid):
        S.append(sk)
        Y.append(yk)

    # Solve linear system
    u_solver, Ahat, Ainvhat, info = matblinsolve(A=A, b=f, A0=Ahat0, Ainv0=Ainvhat0,
                                                 callback=callback_postparams, calibrate=False)

    # Create arrays from lists
    S = np.squeeze(np.array(S)).T
    Y = np.squeeze(np.array(Y)).T

    # E[A] and E[A^-1]
    def posterior_mean(A0, WA0, S, Y):
        """Compute posterior mean of the symmetric probabilistic linear solver."""
        Delta = (Y - A0 @ S)
        U_T = np.linalg.solve(S.T @ (WA0 @ S), (WA0 @ S).T)
        U = U_T.T
        Ak = A0 + Delta @ U_T + U @ Delta.T - U @ S.T @ Delta @ U_T
        return Ak

    Ak = posterior_mean(A0.todense(), WA0, S, Y)
    Hk = posterior_mean(H0.todense(), WH0, Y, S)

    np.testing.assert_allclose(Ahat.mean().todense(), Ak, rtol=1e-5,
                               err_msg="The matrix estimated by the probabilistic linear solver does not match the " +
                                       "directly computed one.")
    np.testing.assert_allclose(Ainvhat.mean().todense(), Hk, rtol=1e-5,
                               err_msg="The inverse matrix estimated by the probabilistic linear solver does not" +
                                       "match the directly computed one.")

    # Cov[A] and Cov[A^-1]
    def posterior_cov_kronfac(WA0, S):
        """Compute the covariance symmetric Kronecker factor of the probabilistic linear solver."""
        U_AT = np.linalg.solve(S.T @ (WA0 @ S), (WA0 @ S).T)
        covfac = WA0 @ (np.identity(np.shape(WA0)[0]) - S @ U_AT)
        return covfac

    A_covfac = posterior_cov_kronfac(WA0, S)
    H_covfac = posterior_cov_kronfac(WH0, Y)

    np.testing.assert_allclose(Ahat.cov().A.todense(), A_covfac, rtol=1e-5,
                               err_msg="The covariance estimated by the probabilistic linear solver does not match the " +
                                       "directly computed one.")
    np.testing.assert_allclose(Ainvhat.cov().A.todense(), H_covfac, rtol=1e-5,
                               err_msg="The covariance estimated by the probabilistic linear solver does not" +
                                       "match the directly computed one.")
Ejemplo n.º 5
0
    def _create_output_randvars(self, S=None, Y=None, Phi=None, Psi=None):
        """Return output random variables x, A, Ainv from their means and covariances."""

        _A_covfactor = self.A_covfactor
        _Ainv_covfactor = self.Ainv_covfactor

        # Set degrees of freedom based on uncertainty calibration in unexplored space
        if Phi is not None:

            def _mv(x):
                def _I_S_fun(x):
                    return x - S @ np.linalg.solve(S.T @ S, S.T @ x)

                return _I_S_fun(Phi @ _I_S_fun(x))

            I_S_Phi_I_S_op = linear_operators.LinearOperator(
                shape=self.A.shape, matvec=_mv)
            _A_covfactor = self.A_covfactor + I_S_Phi_I_S_op

        if Psi is not None:

            def _mv(x):
                def _I_Y_fun(x):
                    return x - Y @ np.linalg.solve(Y.T @ Y, Y.T @ x)

                return _I_Y_fun(Psi @ _I_Y_fun(x))

            I_Y_Psi_I_Y_op = linear_operators.LinearOperator(
                shape=self.A.shape, matvec=_mv)
            _Ainv_covfactor = self.Ainv_covfactor + I_Y_Psi_I_Y_op

        # Create output random variables
        A = probability.RandomVariable(
            shape=self.A_mean.shape,
            dtype=float,
            distribution=probability.Normal(
                mean=self.A_mean,
                cov=linear_operators.SymmetricKronecker(A=_A_covfactor)))
        cov_Ainv = linear_operators.SymmetricKronecker(A=_Ainv_covfactor)
        Ainv = probability.RandomVariable(shape=self.Ainv_mean.shape,
                                          dtype=float,
                                          distribution=probability.Normal(
                                              mean=self.Ainv_mean,
                                              cov=cov_Ainv))
        # Induced distribution on x via Ainv
        # Exp = x = A^-1 b, Cov = 1/2 (W b'Wb + Wbb'W)
        Wb = _Ainv_covfactor @ self.b
        bWb = np.squeeze(Wb.T @ self.b)

        def _mv(x):
            return 0.5 * (bWb * _Ainv_covfactor @ x + Wb @ (Wb.T @ x))

        cov_op = linear_operators.LinearOperator(
            shape=np.shape(_Ainv_covfactor),
            dtype=float,
            matvec=_mv,
            matmat=_mv)

        x = probability.RandomVariable(shape=(self.A_mean.shape[0], ),
                                       dtype=float,
                                       distribution=probability.Normal(
                                           mean=self.x.ravel(), cov=cov_op))
        return x, A, Ainv
Ejemplo n.º 6
0
def _preprocess_linear_system(A, b, assume_A, A0=None, Ainv0=None, x0=None):
    """
    Transform the linear system to linear operator and random variable form.

    Parameters
    ----------
    A : array-like or LinearOperator or RandomVariable
        A square matrix, linear operator or random variable representing the prior belief over :math:`A`.
    b : array_like, shape=(n,) or (n, nrhs)
        Right-hand side vector or matrix in :math:`A x = b`.
    assume_A : str, default="sympos"
        Assumptions on the matrix, which can influence solver choice or behavior. The available options are

        ====================  =========
         generic matrix       ``gen``
         symmetric            ``sym``
         positive definite    ``pos``
         symmetric pos. def.  ``sympos``
        ====================  =========

        If ``A`` or ``Ainv`` are random variables, then the encoded assumptions in the distribution are used
        automatically.
    A0 : RandomVariable, shape=(n,n)
        Random variable representing the prior belief over the linear operator :math:`A`.
    Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n)
        Optional. A square matrix, linear operator or random variable representing the prior belief over the inverse
        :math:`H=A^{-1}`.
    x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs)
        Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv`` is given.

    Returns
    -------
    A : RandomVariable, shape=(n,n)
        Prior belief over the linear operator :math:`A`.
    b : array-like, shape=(n,) or (n, nrhs)
        Right-hand-side of the linear system.
    A0 : RandomVariable, shape=(n,n)
        Prior belief over the linear operator :math:`A`.
    Ainv0 : RandomVariable, shape=(n,n)
        Prior belief over the linear operator inverse :math:`H=A^{-1}`.
    x : array-like or RandomVariable, shape=(n,) or (n, nrhs)
        Prior belief over the solution :math:`x` to the linear system.
    """
    # Choose matrix based view if not clear from arguments
    if (Ainv0 is not None or A0 is not None) and x0 is not None:
        warnings.warn(
            "Cannot use prior information on both the matrix (inverse) and the solution. The latter will be ignored."
        )
        x = None
    else:
        x = x0

    # Check matrix assumptions
    if assume_A not in ["gen", "sym", "pos", "sympos"]:
        raise ValueError(
            '\'{}\' is not a recognized linear operator assumption.'.format(
                assume_A))

    # Choose priors for A and Ainv if not specified, based on matrix assumptions in "assume_A"
    if assume_A == "sympos":
        # No priors specified
        if A0 is None and Ainv0 is None:
            dist = probability.Normal(
                mean=linear_operators.Identity(shape=A.shape[0]),
                cov=linear_operators.SymmetricKronecker(
                    linear_operators.Identity(shape=A.shape[0])))
            Ainv0 = probability.RandomVariable(distribution=dist)

            dist = probability.Normal(
                mean=linear_operators.Identity(shape=A.shape[0]),
                cov=linear_operators.SymmetricKronecker(
                    linear_operators.Identity(shape=A.shape[0])))
            A0 = probability.RandomVariable(distribution=dist)
        # Only prior on Ainv specified
        elif A0 is None and Ainv0 is not None:
            try:
                if isinstance(Ainv0, probability.RandomVariable):
                    A0_mean = Ainv0.mean().inv()
                else:
                    A0_mean = Ainv0.inv()
            except AttributeError:
                warnings.warn(
                    message=
                    "Prior specified only for Ainv. Inverting prior mean naively. "
                    +
                    "This operation is computationally costly! Specify an inverse prior (mean) instead."
                )
                A0_mean = np.linalg.inv(Ainv0.mean())
            except NotImplementedError:
                A0_mean = linear_operators.Identity(A.shape[0])
                warnings.warn(
                    message=
                    "Prior specified only for Ainv. Automatic prior mean inversion not implemented, "
                    + "falling back to standard normal prior.")
            # hereditary positive definiteness
            A0_covfactor = A

            dist = probability.Normal(
                mean=A0_mean,
                cov=linear_operators.SymmetricKronecker(A=A0_covfactor))
            A0 = probability.RandomVariable(distribution=dist)
        # Only prior on A specified
        if A0 is not None and Ainv0 is None:
            try:
                if isinstance(A0, probability.RandomVariable):
                    Ainv0_mean = A0.mean().inv()
                else:
                    Ainv0_mean = A0.inv()
            except AttributeError:
                warnings.warn(
                    message=
                    "Prior specified only for Ainv. Inverting prior mean naively. "
                    +
                    "This operation is computationally costly! Specify an inverse prior (mean) instead."
                )
                Ainv0_mean = np.linalg.inv(A0.mean())
            except NotImplementedError:
                Ainv0_mean = linear_operators.Identity(A.shape[0])
                warnings.warn(
                    message="Prior specified only for Ainv. " +
                    "Automatic prior mean inversion failed, falling back to standard normal prior."
                )
            # (non-symmetric) posterior correspondence
            Ainv0_covfactor = Ainv0_mean

            dist = probability.Normal(
                mean=Ainv0_mean,
                cov=linear_operators.SymmetricKronecker(A=Ainv0_covfactor))
            Ainv0 = probability.RandomVariable(distribution=dist)

    elif assume_A == "sym":
        raise NotImplementedError
    elif assume_A == "pos":
        raise NotImplementedError
    elif assume_A == "gen":
        # TODO: Implement case where only a pre-conditioner is given as Ainv0
        # TODO: Automatic prior selection based on data scale, matrix trace, etc.
        raise NotImplementedError

    # Transform linear system to correct dimensions
    b = utils.as_colvec(b)  # (n,) -> (n, 1)
    if x0 is not None:
        x = utils.as_colvec(x0)  # (n,) -> (n, 1)

    assert (not (Ainv0 is None
                 and x is None)), "Neither Ainv nor x are specified."

    return A, b, A0, Ainv0, x
Ejemplo n.º 7
0
def test_symmkronecker_commutation(A, B):
    """Symmetric Kronecker products fulfill A (x)_s B = B (x)_s A"""
    W = linear_operators.SymmetricKronecker(A=A, B=B)
    V = linear_operators.SymmetricKronecker(A=B, B=A)

    np.testing.assert_allclose(W.todense(), V.todense())
Ejemplo n.º 8
0
def test_symmkronecker_transpose(A, B):
    """Kronecker product transpose property: (A (x) B)^T = A^T (x) B^T."""
    W = linear_operators.SymmetricKronecker(A=A, B=B)
    V = linear_operators.SymmetricKronecker(A=A.T, B=B.T)

    np.testing.assert_allclose(W.T.todense(), V.todense())
Ejemplo n.º 9
0

# Linear operator arithmetic
np.random.seed(42)
scalars = [0, int(1), .1, -4.2, np.nan, np.inf]
arrays = [np.random.normal(size=[5, 4]), np.array([[3, 4],
                                                   [1, 5]])]
ops = [linear_operators.MatrixMult(np.array([[-1.5, 3],
                                             [0, -230]])),
       linear_operators.LinearOperator(shape=(2, 2), matvec=mv),
       linear_operators.Identity(shape=4),
       linear_operators.Kronecker(A=linear_operators.MatrixMult(np.array([[2, -3.5],
                                                                          [12, 6.5]])),
                                  B=linear_operators.Identity(shape=2)),
       linear_operators.SymmetricKronecker(A=linear_operators.MatrixMult(np.array([[1, -2],
                                                                                   [-2.2, 5]])),
                                           B=linear_operators.MatrixMult(np.array([[1, -3],
                                                                                   [0, -.5]])))]


@pytest.mark.parametrize("A, alpha", list(itertools.product(arrays, scalars)))
def test_scalar_mult(A, alpha):
    """Matrix linear operator multiplication with scalars."""
    Aop = linear_operators.MatrixMult(A)

    np.testing.assert_allclose((alpha * Aop).todense(), alpha * A)


@pytest.mark.parametrize("A, B", list(zip(arrays, arrays)))
def test_addition(A, B):
    """Linear operator addition"""
    Aop = linear_operators.MatrixMult(A)
Ejemplo n.º 10
0
    np.empty(2),
    np.zeros(2),
    np.array([np.inf, 1]),
    np.array([1, -2.5])
]
matrices2d = [np.array([[1, 2], [3, 2]]), np.array([[0, 0], [1.0, -4.3]])]
linops2d = [linear_operators.MatrixMult(A=np.array([[1, 2], [4, 5]]))]
randvars2d = [
    probability.RandomVariable(distribution=probability.Normal(
        mean=np.array([1, 2]), cov=np.array([[2, 0], [0, 5]])))
]
randvars2x2 = [
    probability.RandomVariable(shape=(2, 2),
                               distribution=probability.Normal(
                                   mean=np.array([[-2, .3], [0, 1]]),
                                   cov=linear_operators.SymmetricKronecker(
                                       A=np.eye(2), B=np.ones((2, 2)))))
]


@pytest.mark.parametrize("x,rv", list(itertools.product(arrays2d, randvars2d)))
def test_rv_addition(x, rv):
    """Addition with random variables."""
    z1 = x + rv
    z2 = rv + x
    assert z1.shape == rv.shape
    assert z2.shape == rv.shape
    assert isinstance(z1, probability.RandomVariable)
    assert isinstance(z2, probability.RandomVariable)


@pytest.mark.parametrize("alpha, rv",