Пример #1
0
    def test_evidence(self):
        # analytic solution based on
        # https://www.econstor.eu/bitstream/10419/85883/1/02084.pdf eq 3 (whereever that is coming from)
        exponent = (
            -1
            / 2
            * (
                np.dot(data, data) / sigma ** 2
                + prior_mean ** 2 / prior_sd ** 2
                - self.mean ** 2 / self.scale ** 2
            )
        )

        z = (
            (2 * np.pi) ** (-n / 2)
            * sigma ** (-n)
            * self.scale
            / prior_sd
            * np.exp(exponent)
        )
        logz = np.log(z)

        prior = bayem.MVN(prior_mean, 1.0 / prior_sd ** 2)
        narrow_gamma = bayem.Gamma.FromMeanStd(1 / sigma ** 2, 1.0e-6)
        result1 = bayem.vba(model_error, prior, narrow_gamma, update_noise=True)
        result2 = bayem.vba(model_error, prior, narrow_gamma, update_noise=False)
        self.assertAlmostEqual(result1.f_max, logz, places=6)
        self.assertAlmostEqual(result2.f_max, logz, places=6)
Пример #2
0
    def test_wrong_number_of_noises(self):
        def me(prm):
            return np.array(prm - 10)

        # That should work:
        correct_noise = bayem.Gamma(1, 2)
        info = bayem.vba(me, bayem.MVN(7, 12), correct_noise)
        self.assertTrue(info.success)

        # Providing a wrong dimension in the noise pattern should fail.
        wrong_noise = bayem.Gamma((1, 1), (2, 2))
        with self.assertRaises(Exception):
            bayem.vba(me, bayem.MVN(7, 12), wrong_noise)
def test_skip_exceptions():
    # Due to a buggy exception (arising at very first iteration)
    result = bayem.vba(f_value_error,
                       x0,
                       noise0=None,
                       allowed_exceptions=ValueError)
    assert result.success == False

    # Due an occasional exception at some iteration
    result = bayem.vba(f_delayed_value_error,
                       x0,
                       noise0=None,
                       allowed_exceptions=ValueError)
    print(result)
    assert result.success == False
Пример #4
0
    def test_given_mu(self):
        """
        We infer the gamma distribution of the noise precision for a
        given, fixed parameter mu. This is done by setting a prior with
        a very high precision.

        For a super noninformative noise prior (shape=0, scale->inf), the
        analytic solution for the INVERSE gamma distribution for the noise
        VARIANCE reads
        """
        a = n / 2
        b = np.sum((data - prior_mean) ** 2) / 2

        # The parameters for the corresponding gamma distribution for the
        # PRECISION then read (a, 1/b)

        big_but_not_nan = 1e20
        prior = bayem.MVN(prior_mean, big_but_not_nan)
        gamma = bayem.Gamma(shape=1.0e-20, scale=big_but_not_nan)

        result = bayem.vba(model_error, prior, gamma, update_noise=True)
        self.assertTrue(result.success)

        gamma = result.noise
        self.assertAlmostEqual(gamma.shape, a)
        self.assertAlmostEqual(gamma.scale, 1 / b)
Пример #5
0
    def given_noise(self, update_noise, check_method):
        prior = bayem.MVN(prior_mean, 1.0 / prior_sd ** 2)

        gamma = bayem.Gamma.FromMeanStd(1 / sigma ** 2, 42)

        result = bayem.vba(model_error, prior, gamma, update_noise=update_noise)
        self.assertTrue(result.success)
        check_method(result.param.mean[0], self.mean)
        check_method(result.param.std_diag[0], self.scale)
Пример #6
0
    def test_inconsistent_noise(self):
        def dict_model_error(numbers):
            return {"A": np.ones(5), "B": np.zeros(5)}

        param0 = bayem.MVN()
        noise0 = {"A": bayem.Gamma(1, 1), "B": bayem.Gamma(2, 2)}

        # You may provide a single update_noise flag
        result = bayem.vba(dict_model_error, param0, noise0, update_noise=False)
        self.assert_gamma_equal(result.noise["A"], noise0["A"])
        self.assert_gamma_equal(result.noise["B"], noise0["B"])

        # Alternatively, you can provide a dict containing _all_ noise keys
        result = bayem.vba(
            dict_model_error, param0, noise0, update_noise={"A": True, "B": False}
        )
        self.assert_not_gamma_equal(result.noise["A"], noise0["A"])
        self.assert_gamma_equal(result.noise["B"], noise0["B"])

        # There will be an error, if you forget one
        with self.assertRaises(KeyError):
            bayem.vba(dict_model_error, param0, noise0, update_noise={"A": True})
Пример #7
0
def test_result_io():
    def dummy_me(prm):
        return prm**2

    result = bayem.vba(
        dummy_me,
        x0=bayem.MVN([1, 1], np.diag([1, 1]), parameter_names=["A", "B"]),
    )

    with TemporaryDirectory() as f:
        filename = str(Path(f) / "tmp.json")
        dumped = bayem.save_json(result, filename)
        loaded = bayem.load_json(filename)
        dumped_again = bayem.save_json(loaded)
        assert dumped == dumped_again
Пример #8
0
    def run_test(self, noise_prior, delta, update_noise=True):
        """
        Infers two parameters of a linear model where the data is arranged
        in two noise groups.

        noise_prior:
            gamma distribution for the noise precision or
            None for a noninformative prior
        delta:
            absolute value of the inferred noise standard deviation to compare
        """
        np.random.seed(42)

        fw = ForwardModel()
        data = fw([PRM_A, PRM_B])

        data["group0"] += np.random.normal(0, NOISE0_SD, len(data["group0"]))
        data["group1"] += np.random.normal(0, NOISE1_SD, len(data["group1"]))

        param_prior = bayem.MVN([6.0, 2.0],
                                [[1 / 15.0**2, 0], [0, 1 / 7.0**2]])

        me = ModelError(fw, data)
        info = bayem.vba(me,
                         param_prior,
                         noise_prior,
                         update_noise=update_noise)

        param = info.param
        self.assertAlmostEqual(param.mean[0],
                               PRM_A,
                               delta=2 * param.std_diag[0])
        self.assertAlmostEqual(param.mean[1],
                               PRM_B,
                               delta=2 * param.std_diag[1])

        noise_sds = {
            n: 1.0 / gamma.mean**0.5
            for n, gamma in info.noise.items()
        }
        self.assertAlmostEqual(noise_sds["group0"], NOISE0_SD, delta=delta)
        self.assertAlmostEqual(noise_sds["group1"], NOISE1_SD, delta=delta)
        print("noise prior was", noise_prior)
        print(param)
        print(noise_sds)
        print()
        return info
Пример #9
0
def test_vb(n_data, given_jac):
    np.random.seed(6174)

    fw = ForwardModel()
    param_true = [7.0, 10.0]
    noise_sd = 0.1

    data = []
    perfect_data = fw(param_true)
    for _ in range(n_data):
        data.append(perfect_data +
                    np.random.normal(0, noise_sd, len(perfect_data)))

    me = ModelError(fw, data, given_jac)

    param_prior = bayem.MVN([0, 11], [[1 / 7**2, 0], [0, 1 / 3**2]])
    noise_prior = bayem.Gamma.FromSDQuantiles(0.5 * noise_sd, 1.5 * noise_sd)

    info = bayem.vba(me, param_prior, noise_prior, jac=given_jac)

    param_post, noise_post = info.param, info.noise

    for i in range(2):
        posterior_mean = param_post.mean[i]
        posterior_std = param_post.std_diag[i]

        assert posterior_std < 0.3
        assert posterior_mean == pytest.approx(param_true[i],
                                               abs=2 * posterior_std)

        post_noise_precision = noise_post.mean
    post_noise_sd = 1.0 / post_noise_precision**0.5
    assert post_noise_sd == pytest.approx(noise_sd, rel=0.01)

    assert info.nit < 20
    print(info)
def test_catch_exceptions():
    with pytest.raises(ValueError):
        result = bayem.vba(f_value_error, x0)
Пример #11
0
def test_returned_noise_type():
    assert isinstance(bayem.vba(f, x0).noise, bayem.Gamma)
    assert isinstance(bayem.vba(f_list, x0).noise, list)
    assert isinstance(bayem.vba(f_dict, x0).noise, dict)
Пример #12
0
def test_minimal():
    assert bayem.vba(f, x0).success
    assert bayem.vba(f_list, x0).success
    assert bayem.vba(f_dict, x0).success
Пример #13
0
import bayem
import numpy as np

x = np.linspace(0, 1, 1000)
data = 5 * x + 42 + np.random.normal(size=1000)


def k(theta):
    return theta[0] * x + theta[1] - data


wide_prior = bayem.MVN.FromMeanStd([0,0], [1000, 1000])
result = bayem.vba(k, x0=wide_prior)
result.summary()
Пример #14
0
def test_provide_everything():
    assert bayem.vba(f, x0, n0, jac).success
    assert bayem.vba(f_list, x0, n0_list, jac_list).success
    assert bayem.vba(f_dict, x0, n0_dict, jac_dict).success
    assert bayem.vba(f_jac, x0, n0, jac=True).success
    assert bayem.vba(f_jac_dict, x0, n0_dict, jac=True).success
Пример #15
0
    np.random.seed(6174)
    N = 100
    xs = np.linspace(1, 2, N)

    def g(theta):
        return theta[1]**2 + xs * theta[0]

    noise = 1.0
    data = g([5, 7]) + np.random.normal(0, noise, size=N)

    def f(theta):
        k = g(theta) - data

        d_dm = xs
        d_dc = 2 * theta[1] * np.ones_like(xs)
        return k, np.vstack([d_dm, d_dc]).T

    m0 = np.array([2, 19])
    L0 = np.array([[0.001, 0], [0, 0.001]])

    info = vba(f, m0, L0)
    for what, value in info.items():
        print(what, value)

    import bayem

    info = bayem.vba(f, x0=bayem.MVN(m0, L0), jac=True)
    print(1 / info.noise.mean**0.5)
    print(info)
    bayem.result_trace(info)
Пример #16
0
me = ModelError(fw, data, with_jacobian=False)

# setting mean and precision
prec = np.identity(len(param_true))
prec[0, 0] = 1 / 3 ** 2
prec[1, 1] = 1 / 3 ** 2
for i in range(n_sensors):
    prec[1 + i, 1 + i] = 2
param_prior = bayem.MVN(mean=np.array([6] + [0] * n_sensors), precision=prec)
noise_prior = bayem.Gamma.FromSDQuantiles(0.1 * noise_std, 10 * noise_std)

info = bayem.vba(
    me,
    param_prior,
    noise_prior,
    jac=False,
    index_ARD=np.arange(1, n_sensors + 1),
    maxiter=500,
    maxtrials=50,
    update_noise=True,
)


@pytest.mark.skip(reason="There is something going wrong in this test design.")
def test_checks():
    means, sds = info.param.mean, info.param.std_diag
    for i, p in enumerate(param_true):
        mean, sd = means[i], sds[i]
        assert sd < 0.05
        assert mean == pytest.approx(p, abs=2 * sd)

    post_noise_precision = info.noise.mean
Пример #17
0
def test_no_noise():
    assert bayem.vba(f, x0, jac=jac).success
    assert bayem.vba(f_list, x0, jac=jac_list).success
    assert bayem.vba(f_dict, x0, jac=jac_dict).success
    assert bayem.vba(f_jac, x0, jac=True).success
Пример #18
0
def test_no_jacobian():
    assert bayem.vba(f, x0, n0).success
    assert bayem.vba(f_list, x0, n0_list).success
    assert bayem.vba(f_dict, x0, n0_dict).success
Пример #19
0
        test_img = imread(test_img_name)
        ref_img = imread(ref_img_name)

        assert np.linalg.norm(test_img - ref_img) == pytest.approx(0)


np.random.seed(6174)
t = np.linspace(1, 2, 10)
noise = np.random.normal(0, 0.42, len(t))


def f(x):
    return t * x[0]**2 - t * 9 + noise


info = bayem.vba(f, x0=bayem.MVN([2], [0.5]), noise0=bayem.Gamma(1, 2))


def test_pair_plot(generate_ref_img=False):
    visu.pair_plot(info, show=False)
    compare_plt("ref_pair_plot.png", generate_ref_img=generate_ref_img)


def test_trace_plot(generate_ref_img=False):
    visu.result_trace(info, show=False)
    compare_plt("ref_trace_plot.png", generate_ref_img=generate_ref_img)


if __name__ == "__main__":
    generate = True
    test_pair_plot(generate_ref_img=generate)
import pytest

import bayem

np.random.seed(6174)
x = np.linspace(0, 1, 1000)
A, B, sd = 7.0, 42.0, 0.1
data = A * x + B + np.random.normal(0, sd, len(x))


def model_error(parameters):
    return parameters[0] * x + parameters[1] - data


x0 = bayem.MVN([6, 11], [[1 / 3**2, 0], [0, 1 / 3**2]])
info = bayem.vba(model_error, x0, noise0=None)
print(info)


def test_results():
    for i, correct_value in enumerate([A, B]):
        posterior_mean = info.param.mean[i]
        posterior_std = info.param.std_diag[i]

        assert posterior_std < 0.3
        assert posterior_mean == pytest.approx(correct_value,
                                               abs=2 * posterior_std)

    post_noise_precision = info.noise.mean
    post_noise_std = 1.0 / post_noise_precision**0.5
    assert post_noise_std == pytest.approx(sd, rel=0.01)