def test_register(): # Check if registering a new config entry works probnum.config.register("some_config", 3.14, "Dummy description.") assert hasattr(probnum.config, "some_config") assert probnum.config.some_config == 3.14 # When registering a new entry with an already existing name, throw with pytest.raises(KeyError): probnum.config.register("some_config", 4.2, "Dummy description.") # Check if temporarily setting the config entry to a different value (via # the context manager) works with probnum.config(some_config=9.9): assert probnum.config.some_config == 9.9 # Upon exiting the context manager, the previous value is restored assert probnum.config.some_config == 3.14 # Setting the config entry permanently also works by # accessing the attribute directly probnum.config.some_config = 4.5 assert probnum.config.some_config == 4.5 # Setting a config entry before registering it, does not work. Neither via # the context manager ... with pytest.raises(AttributeError): with probnum.config(unknown_config=False): pass # ... nor by accessing the attribute directly. with pytest.raises(AttributeError): probnum.config.unknown_config = False
def test_cov_linops(self): # Ignore scalar support, because in this case, LinOps make no sense. for supp in self.supports[1:]: with self.subTest(): with config(matrix_free=True): rv_lo = randvars.Constant(support=supp) assert isinstance(rv_lo.cov, linops.LinearOperator) with config(matrix_free=False): rv_de = randvars.Constant(support=supp) assert isinstance(rv_de.cov, np.ndarray)
def _setup( self, some_num_derivatives, forw_impl_string_linear_gauss, backw_impl_string_linear_gauss, ): self.some_num_derivatives = some_num_derivatives spatialdim = 1 # make tests compatible with some_normal_rv1, etc. with config(matrix_free=True): self.transition = randprocs.markov.integrator.IntegratedWienerTransition( num_derivatives=self.some_num_derivatives, wiener_process_dimension=spatialdim, forward_implementation=forw_impl_string_linear_gauss, backward_implementation=backw_impl_string_linear_gauss, ) self.G = lambda t: self.transition.drift_matrix self.v = lambda t: self.transition.force_vector self.L = lambda t: self.transition.dispersion_matrix self.G_const = self.transition.drift_matrix self.v_const = self.transition.force_vector self.L_const = self.transition.dispersion_matrix self.g = lambda t, x: self.G(t) @ x + self.v(t) self.dg = lambda t, x: self.G(t) self.l = lambda t, x: self.L(t)
def setup(self, matrix_free, len_trajectory, num_derivatives, dimension): with config(matrix_free=matrix_free): dynamics = randprocs.markov.integrator.IntegratedWienerTransition( num_derivatives=num_derivatives, wiener_process_dimension=dimension, forward_implementation="classic", backward_implementation="classic", ) measvar = 0.1024 initrv = randvars.Normal( np.ones(dynamics.state_dimension), measvar * linops.Identity(dynamics.state_dimension), ) time_domain = (0.0, float(len_trajectory)) self.time_grid = np.arange(*time_domain) self.markov_process = randprocs.markov.MarkovProcess( initarg=time_domain[0], initrv=initrv, transition=dynamics) rng = np.random.default_rng(seed=1) self.base_measure_realization = scipy.stats.norm.rvs( size=(self.time_grid.shape + initrv.shape), random_state=rng, )
def _setup( self, test_ndim, spdmat1, spdmat2, ): with config(matrix_free=True): self.noise_fun = lambda t: randvars.Normal( mean=np.arange(test_ndim), cov=linops.aslinop(spdmat2)) self.transition_matrix_fun = lambda t: linops.aslinop(spdmat1) self.transition = randprocs.markov.discrete.LinearGaussian( input_dim=test_ndim, output_dim=test_ndim, transition_matrix_fun=self.transition_matrix_fun, noise_fun=self.noise_fun, forward_implementation="classic", backward_implementation="classic", ) self.sqrt_transition = randprocs.markov.discrete.LinearGaussian( input_dim=test_ndim, output_dim=test_ndim, transition_matrix_fun=self.transition_matrix_fun, noise_fun=self.noise_fun, forward_implementation="sqrt", backward_implementation="sqrt", ) self.transition_fun = lambda t, x: self.transition_matrix_fun(t ) @ x self.transition_fun_jacobian = lambda t, x: self.transition_matrix_fun( t)
def _setup( self, test_ndim, spdmat1, spdmat2, ): with config(matrix_free=True): self.G = lambda t: linops.aslinop(spdmat1) self.S = lambda t: linops.aslinop(spdmat2) self.v = lambda t: np.arange(test_ndim) self.transition = randprocs.markov.discrete.LinearGaussian( test_ndim, test_ndim, self.G, self.v, self.S, forward_implementation="classic", backward_implementation="classic", ) self.sqrt_transition = randprocs.markov.discrete.LinearGaussian( test_ndim, test_ndim, self.G, self.v, self.S, forward_implementation="sqrt", backward_implementation="sqrt", ) self.g = lambda t, x: self.G(t) @ x + self.v(t) self.dg = lambda t, x: self.G(t)
def test_discretise_values(self, ah_22_ibm, qh_22_ibm, dt): with config(matrix_free=True): discrete_model = self.transition.discretise(dt=dt) np.testing.assert_allclose( discrete_model.transition_matrix.todense(), ah_22_ibm) np.testing.assert_allclose(discrete_model.noise.cov.todense(), qh_22_ibm)
def _setup(self, ): wiener_process_dimension = 1 # make tests compatible with some_normal_rv1, etc. with config(matrix_free=True): self.transition = randprocs.markov.integrator.IntegratedWienerTransition( num_derivatives=2, wiener_process_dimension=wiener_process_dimension, forward_implementation="classic", backward_implementation="classic", )
def time_sample(self, matrix_free, len_trajectory, num_derivatives, dimension): with config(matrix_free=matrix_free): self.markov_process.transition.jointly_transform_base_measure_realization_list_forward( base_measure_realizations=self.base_measure_realization, t=self.time_grid, initrv=self.markov_process.initrv, _diffusion_list=np.ones_like(self.time_grid[:-1]), )
def test_lazy_matrix_matrix_matmul_option(): mat1 = get_linop(Matrix)[0] mat2 = get_linop(Matrix)[0] inv = get_linop(_InverseLinearOperator) transposed = get_linop(TransposedLinearOperator) with config(lazy_matrix_matrix_matmul=True): assert isinstance(mat1 @ mat2, ProductLinearOperator) assert isinstance(mat1 @ inv, ProductLinearOperator) assert isinstance(inv @ mat2, ProductLinearOperator) assert isinstance(mat1 @ transposed, ProductLinearOperator) assert isinstance(transposed @ mat2, ProductLinearOperator) with config(lazy_matrix_matrix_matmul=False): assert isinstance(mat1 @ mat2, Matrix) assert isinstance(mat1 @ inv, Matrix) assert isinstance(inv @ mat2, Matrix) assert isinstance(mat1 @ transposed, Matrix) assert isinstance(transposed @ mat2, Matrix)
def test_forward_realization_values(self, normal_rv3x3, diffusion, ah_22_ibm, qh_22_ibm, dt): with config(matrix_free=True): real = normal_rv3x3.mean rv, _ = self.transition.forward_realization(real, t=0.0, dt=dt, _diffusion=diffusion) np.testing.assert_allclose(ah_22_ibm @ real, rv.mean) np.testing.assert_allclose(diffusion * qh_22_ibm, rv.cov.todense())
def test_backward_realization(self, some_normal_rv1, some_normal_rv2): with config(matrix_free=True): array_cov_rv = some_normal_rv2 linop_cov_rv = randvars.Normal(array_cov_rv.mean.copy(), linops.aslinop(array_cov_rv.cov)) with pytest.warns(RuntimeWarning): self.transition.backward_realization(some_normal_rv1.mean, array_cov_rv) out, _ = self.transition.backward_realization( some_normal_rv1.mean, linop_cov_rv) assert isinstance(out, randvars.Normal)
def test_defaults(): none_vals = {key: None for (key, _, _) in _DEFAULT_CONFIG_OPTIONS} for key, default_val, _ in _DEFAULT_CONFIG_OPTIONS: # Check if default is correct before context manager assert getattr(probnum.config, key) == default_val # Temporarily set all config values to None with probnum.config(**none_vals): assert getattr(probnum.config, key) is None # Check if the original (default) values are set after exiting the context # manager assert getattr(probnum.config, key) == default_val
def test_forward_rv_values(self, normal_rv3x3, diffusion, ah_22_ibm, qh_22_ibm, dt): with config(matrix_free=True): rv, _ = self.transition.forward_rv(normal_rv3x3, t=0.0, dt=dt, _diffusion=diffusion) np.testing.assert_allclose(ah_22_ibm @ normal_rv3x3.mean, rv[:3].mean) np.testing.assert_allclose( ah_22_ibm @ normal_rv3x3.cov @ ah_22_ibm.T + diffusion * qh_22_ibm, rv.cov.todense(), )
def test_damping_factor_config(self): mean, cov = self.params rv = randvars.Normal(mean, cov) chol_standard_damping = rv.dense_cov_cholesky(damping_factor=None) self.assertAllClose( chol_standard_damping, np.sqrt(rv.cov + 1e-12), ) with config(covariance_inversion_damping=1e-3): chol_altered_damping = rv.dense_cov_cholesky(damping_factor=None) self.assertAllClose( chol_altered_damping, np.sqrt(rv.cov + 1e-3), )
def test_forward_rv(self, some_normal_rv1): array_cov_rv = some_normal_rv1 linop_cov_rv = randvars.Normal(array_cov_rv.mean.copy(), linops.aslinop(array_cov_rv.cov)) with config(matrix_free=True): with pytest.warns(RuntimeWarning): self.transition.forward_rv(array_cov_rv, 0.0) out, _ = self.transition.forward_rv(linop_cov_rv, 0.0) assert isinstance(out, randvars.Normal) assert isinstance(out.cov, linops.LinearOperator) assert isinstance(out.cov_cholesky, linops.LinearOperator) with pytest.raises(NotImplementedError): self.sqrt_transition.forward_rv(array_cov_rv, 0.0) with pytest.raises(NotImplementedError): self.sqrt_transition.forward_rv(linop_cov_rv, 0.0)
def test_precompute_cov_cholesky(self): mean, cov = self.params rv = randvars.Normal(mean, cov) with self.subTest("No Cholesky precomputed"): self.assertFalse(rv.cov_cholesky_is_precomputed) with self.subTest("Damping factor check"): with config(matrix_free=False): rv.precompute_cov_cholesky(damping_factor=10.0) self.assertIsInstance(rv.cov_cholesky, np.ndarray) self.assertAllClose( rv.cov_cholesky, np.linalg.cholesky(rv.cov + 10.0 * np.eye(len(rv.cov))), ) with self.subTest("Cholesky is precomputed"): self.assertTrue(rv.cov_cholesky_is_precomputed)
def test_precompute_cov_cholesky_with_linops(self): mean, cov = self.params rv = randvars.Normal(mean, linops.aslinop(cov)) with self.subTest("No Cholesky precomputed"): self.assertFalse(rv.cov_cholesky_is_precomputed) with self.subTest("Damping factor check"): with config(matrix_free=True): rv.precompute_cov_cholesky(damping_factor=10.0) self.assertIsInstance(rv.cov_cholesky, linops.LinearOperator) self.assertAllClose( rv.cov_cholesky.todense(), np.linalg.cholesky(cov + 10.0 * np.eye(rv.cov.shape[0])), ) with self.subTest("Cholesky is precomputed"): self.assertTrue(rv.cov_cholesky_is_precomputed)
def test_backward_rv_classic(self, some_normal_rv1, some_normal_rv2): array_cov_rv1 = some_normal_rv1 linop_cov_rv1 = randvars.Normal(array_cov_rv1.mean.copy(), linops.aslinop(array_cov_rv1.cov)) array_cov_rv2 = some_normal_rv2 linop_cov_rv2 = randvars.Normal(array_cov_rv2.mean.copy(), linops.aslinop(array_cov_rv2.cov)) with config(matrix_free=True): with pytest.warns(RuntimeWarning): self.transition.backward_rv(array_cov_rv1, array_cov_rv2) with pytest.warns(RuntimeWarning): self.transition.backward_rv(linop_cov_rv1, array_cov_rv2) with pytest.warns(RuntimeWarning): self.transition.backward_rv(array_cov_rv1, linop_cov_rv2) out, _ = self.transition.backward_rv(linop_cov_rv1, linop_cov_rv2) assert isinstance(out, randvars.Normal) assert isinstance(out.cov, linops.LinearOperator) assert isinstance(out.cov_cholesky, linops.LinearOperator) with pytest.raises(NotImplementedError): self.sqrt_transition.backward_rv(array_cov_rv1, array_cov_rv2) with pytest.raises(NotImplementedError): self.sqrt_transition.backward_rv(linop_cov_rv1, linop_cov_rv2)