def __call__(self, step): scaling_vector = np.abs(step)**self.powers / self.scales if config.matrix_free: return linops.IdentityKronecker( num_blocks=self.dimension, B=linops.Scaling(factors=scaling_vector)) return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
def test_induced_solution_belief(rng: np.random.Generator): """Test whether a consistent belief over the solution is inferred from a belief over the inverse.""" n = 5 A = randvars.Constant(random_spd_matrix(dim=n, rng=rng)) Ainv = randvars.Normal( mean=linops.Scaling(factors=1 / np.diag(A.mean)), cov=linops.SymmetricKronecker(linops.Identity(n)), ) b = randvars.Constant(rng.normal(size=(n, 1))) prior = LinearSystemBelief(A=A, Ainv=Ainv, x=None, b=b) x_infer = Ainv @ b np.testing.assert_allclose(prior.x.mean, x_infer.mean) np.testing.assert_allclose(prior.x.cov.todense(), x_infer.cov.todense())
def __init__( self, support: _ValueType, ): if np.isscalar(support): support = _utils.as_numpy_scalar(support) self._support = support support_floating = self._support.astype( np.promote_types(self._support.dtype, np.float_)) if config.matrix_free: cov = lambda: (linops.Scaling( 0.0, shape=(self._support.size, self._support.size), dtype=support_floating.dtype, ) if self._support.ndim > 0 else _utils.as_numpy_scalar( 0.0, support_floating.dtype)) else: cov = lambda: np.broadcast_to( _utils.as_numpy_scalar(0.0, support_floating.dtype), shape=((self._support.size, self._support.size) if self._support.ndim > 0 else ()), ) var = lambda: np.broadcast_to( _utils.as_numpy_scalar(0.0, support_floating.dtype), shape=self._support.shape, ) super().__init__( shape=self._support.shape, dtype=self._support.dtype, parameters={"support": self._support}, sample=self._sample, in_support=lambda x: np.all(x == self._support), pmf=lambda x: np.float_(1.0 if np.all(x == self._support) else 0.0), cdf=lambda x: np.float_(1.0 if np.all(x >= self._support) else 0.0), mode=lambda: self._support, median=lambda: support_floating, mean=lambda: support_floating, cov=cov, var=var, std=var, )
def test_perfect_information(solver: ProbabilisticLinearSolver, problem: problems.LinearSystem, ncols: int): """Test whether a solver given perfect information converges instantly.""" # Construct prior belief with perfect information belief = beliefs.LinearSystemBelief( x=randvars.Normal(mean=problem.solution, cov=linops.Scaling(factors=0.0, shape=(ncols, ncols))), A=randvars.Constant(problem.A), Ainv=randvars.Constant(np.linalg.inv(problem.A @ np.eye(ncols))), ) # Run solver belief, solver_state = solver.solve(prior=belief, problem=problem, rng=np.random.default_rng(1)) # Check for instant convergence assert solver_state.step == 0 np.testing.assert_allclose(belief.x.mean, problem.solution)
def get_linear_system(name: str, dim: int): rng = np.random.default_rng(0) if name == "dense": if dim > 1000: raise NotImplementedError() A = random_spd_matrix(rng=rng, dim=dim) elif name == "sparse": A = random_sparse_spd_matrix(rng=rng, dim=dim, density=np.minimum(1.0, 1000 / dim**2)) elif name == "linop": if dim > 100: raise NotImplementedError() # TODO: Larger benchmarks currently fail. Remove once PLS refactor (https://github.com/probabilistic-numerics/probnum/issues/51) is resolved A = linops.Scaling(factors=rng.normal(size=(dim, ))) else: raise NotImplementedError() solution = rng.normal(size=(dim, )) b = A @ solution return problems.LinearSystem(A=A, b=b, solution=solution)
def case_scaling_linop() -> linops.Scaling: return linops.Scaling(np.arange(10))
rng = np.random.default_rng(526367 + n) return rng.standard_normal(size=(n, )) @pytest.fixture(scope="module") def vectors() -> np.ndarray: rng = np.random.default_rng(234 + n) return rng.standard_normal(size=(2, 10, n)) @pytest.fixture( scope="module", params=[ np.eye(n), linops.Identity(n), linops.Scaling(factors=1.0, shape=(n, n)), np.inner, ], ) def inprod(request) -> int: return request.param @pytest.fixture( scope="module", params=[ partial(double_gram_schmidt, gram_schmidt_fn=gram_schmidt), partial(double_gram_schmidt, gram_schmidt_fn=modified_gram_schmidt), ], ) def orthogonalization_fn(request) -> int:
def _construct_symmetric_matrix_prior_means(self, A, x0, b): """Create matrix prior means from an initial guess for the solution of the linear system. Constructs a matrix-variate prior mean for H from ``x0`` and ``b`` such that :math:`H_0b = x_0`, :math:`H_0` symmetric positive definite and :math:`A_0 = H_0^{-1}`. Parameters ---------- A : array-like or LinearOperator, shape=(n,n) System matrix assumed to be square. x0 : array-like, shape=(n,) or (n, nrhs) Optional. Guess for the solution of the linear system. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. Returns ------- A0_mean : linops.LinearOperator Mean of the matrix-variate prior distribution on the system matrix :math:`A`. Ainv0_mean : linops.LinearOperator Mean of the matrix-variate prior distribution on the inverse of the system matrix :math:`H = A^{-1}`. """ # Check inner product between x0 and b; if negative or zero, choose better # initialization bx0 = np.squeeze(b.T @ x0) bb = np.linalg.norm(b)**2 if bx0 < 0: x0 = -x0 bx0 = -bx0 print("Better initialization found, setting x0 = - x0.") elif bx0 == 0: if np.all(b == np.zeros_like(b)): print( "Right-hand-side is zero. Initializing with solution x0 = 0." ) x0 = b else: print( "Better initialization found, setting x0 = (b'b/b'Ab) * b." ) bAb = np.squeeze(b.T @ (A @ b)) x0 = bb / bAb * b bx0 = bb**2 / bAb # Construct prior mean of A and H alpha = 0.5 * bx0 / bb def _matmul(M): return (x0 - alpha * b) @ (x0 - alpha * b).T @ M Ainv0_mean = linops.Scaling( alpha, shape=(self.n, self.n)) + 2 / bx0 * linops.LinearOperator( shape=(self.n, self.n), dtype=np.result_type(x0.dtype, alpha.dtype, b.dtype), matmul=_matmul, ) A0_mean = linops.Scaling( 1 / alpha, shape=(self.n, self.n)) - 1 / (alpha * np.squeeze( (x0 - alpha * b).T @ x0)) * linops.LinearOperator( shape=(self.n, self.n), dtype=np.result_type(x0.dtype, alpha.dtype, b.dtype), matmul=_matmul, ) return A0_mean, Ainv0_mean