Пример #1
0
def test_lorenz() -> None:
    error = math.inf
    window = 0
    prior_mu: Union[Tuple[float, float, float],
                    Tuple[numpy_types, numpy_types,
                          numpy_types]] = (3., 3., 3.)

    model = LorenzModel(sigma, beta, rho, time_step)
    observed = list(model.run_model(window_size * max_windows))

    while error > converged_error and window < max_windows:
        xt0 = Gaussian(prior_mu[0], 1.0)
        yt0 = Gaussian(prior_mu[1], 1.0)
        zt0 = Gaussian(prior_mu[2], 1.0)
        graph_time_steps = list(build_graph((xt0, yt0, zt0)))
        xt0.set_and_cascade(prior_mu[0])
        yt0.set_and_cascade(prior_mu[1])
        zt0.set_and_cascade(prior_mu[2])
        apply_observations(graph_time_steps, window, observed)

        optimizer = GradientOptimizer(xt0)
        optimizer.max_a_posteriori()
        posterior = get_time_slice_values(graph_time_steps, window_size - 1)

        post_t = (window + 1) * (window_size - 1)
        actual_at_post_t = observed[post_t]

        error = math.sqrt((actual_at_post_t.x - posterior[0])**2 +
                          (actual_at_post_t.y - posterior[1])**2 +
                          (actual_at_post_t.z - posterior[2])**2)
        prior_mu = (posterior[0], posterior[1], posterior[2])
        window += 1

    assert error <= converged_error
Пример #2
0
def thermometers_max_likelihood_gradient(model: Model, algorithm) -> None:
    net = BayesNet(model.temperature.iter_connected_graph())
    gradient_optimizer = GradientOptimizer(net, algorithm)
    result = gradient_optimizer.max_likelihood()
    assert result.fitness() < 0.

    temperature = result.value_for(model.temperature)
    assert 20.99 < temperature < 21.01
Пример #3
0
def test_thermometers_max_likelihood_gradient(model: Model) -> None:
    net = BayesNet(model.temperature.get_connected_graph())
    gradient_optimizer = GradientOptimizer(net)
    logProb = gradient_optimizer.max_likelihood()
    assert logProb < 0.

    temperature = model.temperature.get_value()
    assert 20.995 < temperature < 21.005
Пример #4
0
def gradient_optimzer_example():
    model = build_model()
    model.first_thermometer.observe(25)
    bayes_net = model.to_bayes_net()
    # %%SNIPPET_START%% PythonGradientOptimizer
    optimizer = GradientOptimizer(bayes_net,
                                  max_evaluations=5000,
                                  relative_threshold=1e-8,
                                  absolute_threshold=1e-8)
    optimizer.max_a_posteriori()
    calculated_temperature = model.temperature.get_value()
Пример #5
0
def test_thermometers_example():
    # %%SNIPPET_START%% PythonTwoThermometers
    with Model() as m:
        m.temperature = Uniform(20., 30.)
        m.first_thermometer = Gaussian(m.temperature, 2.5)
        m.second_thermometer = Gaussian(m.temperature, 5.)

    m.first_thermometer.observe(25.)
    m.second_thermometer.observe(30.)

    bayes_net = m.to_bayes_net()
    optimizer = GradientOptimizer(bayes_net)
    optimizer.max_a_posteriori()

    calculated_temperature = m.temperature.get_value()
Пример #6
0
def test_gradient_can_set_max_eval_builder_properties(model: Model) -> None:
    gradient_optimizer = GradientOptimizer(model.temperature, max_evaluations=5)

    with pytest.raises(Py4JJavaError):
        #This throws a Gradient Optimizer: "Reached Max Evaluations" error
        logProb = gradient_optimizer.max_a_posteriori()
Пример #7
0
def test_gradient_op_throws_with_invalid_net_param() -> None:
    with pytest.raises(TypeError) as excinfo:
        GradientOptimizer(500)  # type: ignore # this is expected to fail mypy

    assert str(excinfo.value) == "net must be a Vertex or a BayesNet. Was given {}".format(int)
Пример #8
0
def test_gradient_op_vertex(model: Model) -> None:
    gradient_optimizer = GradientOptimizer(model.temperature)
    assert len(list(gradient_optimizer.net.get_latent_vertices())) == 1
Пример #9
0
def test_gradient_op_bayes_net(model: Model) -> None:
    net = BayesNet(model.temperature.get_connected_graph())
    gradient_optimizer = GradientOptimizer(net)
    assert gradient_optimizer.net is net
Пример #10
0
def test_gradient_can_set_max_eval_builder_properties_for_conjugate_gradient(model: Model) -> None:
    gradient_optimizer = GradientOptimizer(model.temperature, ConjugateGradient(max_evaluations=5))

    with pytest.raises(Py4JJavaError, match=r"An error occurred while calling o[\d]*.maxAPosteriori."):
        # This throws a Gradient Optimizer: "Reached Max Evaluations" error
        logProb = gradient_optimizer.max_a_posteriori()
Пример #11
0
error = math.inf
window = 0
prior_mu = (3., 3., 3.)

model = LorenzModel(sigma, beta, rho, time_step)
observed = list(model.run_model(window_size * max_windows))

while error > converged_error and window < max_windows:
    xt0 = Gaussian(prior_mu[0], 1.0)
    yt0 = Gaussian(prior_mu[1], 1.0)
    zt0 = Gaussian(prior_mu[2], 1.0)
    graph_time_steps = list(build_graph((xt0, yt0, zt0)))
    xt0.set_and_cascade(prior_mu[0])
    yt0.set_and_cascade(prior_mu[1])
    zt0.set_and_cascade(prior_mu[2])
    apply_observations(graph_time_steps, window, observed)

    optimizer = GradientOptimizer(xt0)
    optimizer.max_a_posteriori()
    posterior = get_time_slice_values(graph_time_steps, window_size - 1)

    post_t = (window + 1) * (window_size - 1)
    actual_at_post_t = observed[post_t]

    error = math.sqrt((actual_at_post_t.x - posterior[0])**2 +
                      (actual_at_post_t.y - posterior[1])**2 +
                      (actual_at_post_t.z - posterior[2])**2)
    prior_mu = (posterior[0], posterior[1], posterior[2])
    window += 1
# %%SNIPPET_END%% LorenzPythonFull