Exemple #1
0
def test_reducers_on_ei(reducer):
    m = 6
    zero = tf.convert_to_tensor([0.0], dtype=tf.float64)
    model = QuadraticMeanAndRBFKernel()
    acqs = [ExpectedImprovement().using("foo") for _ in range(m)]
    acq = reducer.type_class(*acqs)
    acq_fn = acq.prepare_acquisition_function({"foo": reducer.dataset}, {"foo": model})
    individual_ei = [expected_improvement(model, zero, reducer.query_point) for _ in range(m)]
    expected = reducer.raw_reduce_op(individual_ei)
    desired = acq_fn(reducer.query_point)
    np.testing.assert_array_almost_equal(desired, expected)
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition import (SingleModelAcquisitionBuilder,
                                 ExpectedImprovement, Product)


class ProbabilityOfValidity(SingleModelAcquisitionBuilder):
    def prepare_acquisition_function(self, model, dataset=None):
        def acquisition(at):
            mean, _ = model.predict_y(tf.squeeze(at, -2))
            return mean

        return acquisition


ei = ExpectedImprovement()
pov = ProbabilityOfValidity()
acq_fn = Product(ei.using(OBJECTIVE), pov.using(FAILURE))
rule = EfficientGlobalOptimization(acq_fn)  # type: ignore

# %% [markdown]
# ## Run the optimizer
#
# Now, we run the Bayesian optimization loop for twenty steps, and print the location of the query point corresponding to the minimum observation.

# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

result = bo.optimize(20, initial_data, models, rule).final_result.unwrap()

arg_min_idx = tf.squeeze(
Exemple #3
0
local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space,
                                                              num_samples=1000)
local_penalization_acq_rule = EfficientGlobalOptimization(  # type: ignore
    num_query_points=10, builder=local_penalization_acq)
points_chosen_by_local_penalization, _ = local_penalization_acq_rule.acquire_single(
    search_space, initial_data, model)

# %% [markdown]
# We can now visualize the batch of 10 points chosen by each of these methods overlayed on the standard `ExpectedImprovement` acquisition function. `BatchMonteCarloExpectedImprovement` chooses a more diverse set of points, whereas the `LocalPenalizationAcquisitionFunction` focuses evaluations in the most promising areas of the space.

# %%
from trieste.acquisition import ExpectedImprovement

# plot standard EI acquisition function
ei = ExpectedImprovement()
ei_acq_function = ei.prepare_acquisition_function(initial_data, model)
plot_acq_function_2d(ei_acq_function, [0, 0], [1, 1],
                     contour=True,
                     grid_density=100)

plt.scatter(
    points_chosen_by_batch_ei[:, 0],
    points_chosen_by_batch_ei[:, 1],
    color="red",
    lw=5,
    label="Batch-EI",
    marker="*",
    zorder=1,
)
plt.scatter(
from trieste.acquisition import GIBBON

gibbon_acq = GIBBON(search_space, grid_size = 2000)
gibbon_acq_rule = EfficientGlobalOptimization(  # type: ignore
    num_query_points=10, builder=gibbon_acq)
points_chosen_by_gibbon = gibbon_acq_rule.acquire_single(
    search_space, model, dataset=initial_data)

# %% [markdown]
# We can now visualize the batch of 10 points chosen by each of these methods overlayed on the standard `ExpectedImprovement` acquisition function. `BatchMonteCarloExpectedImprovement` chooses a more diverse set of points, whereas `LocalPenalizationAcquisitionFunction` and `GIBBON` focus evaluations in the most promising areas of the space.

# %%
from trieste.acquisition import ExpectedImprovement

# plot standard EI acquisition function
ei = ExpectedImprovement()
ei_acq_function = ei.prepare_acquisition_function(model, dataset=initial_data)
plot_acq_function_2d(ei_acq_function, [0, 0], [1, 1], contour=True, grid_density=100)

plt.scatter(
    points_chosen_by_batch_ei[:, 0],
    points_chosen_by_batch_ei[:, 1],
    color="red",
    lw=5,
    label="Batch-EI",
    marker="*",
    zorder=1,
)
plt.scatter(
    points_chosen_by_local_penalization[:, 0],
    points_chosen_by_local_penalization[:, 1],