Example #1
0
def test_optimizer_finds_minima_of_the_branin_function(
    num_steps: int, acquisition_rule: AcquisitionRule
) -> None:
    search_space = Box([0, 0], [1, 1])

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
        gpr = gpflow.models.GPR((data.query_points, data.observations), kernel, noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    initial_query_points = search_space.sample(5)
    observer = mk_observer(branin, OBJECTIVE)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data[OBJECTIVE])

    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule)
        .try_get_final_datasets()[OBJECTIVE]
    )

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs((best_x - BRANIN_MINIMIZERS) / BRANIN_MINIMIZERS)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.03, axis=-1), axis=0)
    npt.assert_allclose(best_y, BRANIN_MINIMUM, rtol=0.03)
Example #2
0
def test_mk_observer_unlabelled() -> None:
    def foo(x: tf.Tensor) -> tf.Tensor:
        return x + 1

    x_ = tf.constant([[3.0]])
    ys = mk_observer(foo)(x_)

    npt.assert_array_equal(ys.query_points, x_)
    npt.assert_array_equal(ys.observations, x_ + 1)
Example #3
0
def test_mk_observer() -> None:
    def foo(x: tf.Tensor) -> tf.Tensor:
        return x + 1

    x_ = tf.constant([[3.0]])
    ys = mk_observer(foo, "bar")(x_)

    assert ys.keys() == {"bar"}
    npt.assert_array_equal(ys["bar"].query_points, x_)
    npt.assert_array_equal(ys["bar"].observations, x_ + 1)
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
        num_steps: int, acquisition_rule: AcquisitionRule) -> None:
    search_space = Box([-2, -2], [2, 2])

    def build_stacked_independent_objectives_model(
            data: Dataset) -> ModelStack:
        gprs = []
        for idx in range(2):
            single_obj_data = Dataset(
                data.query_points, tf.gather(data.observations, [idx], axis=1))
            variance = tf.math.reduce_variance(single_obj_data.observations)
            kernel = gpflow.kernels.Matern52(
                variance, tf.constant([0.2, 0.2], tf.float64))
            gpr = gpflow.models.GPR(single_obj_data.astuple(),
                                    kernel,
                                    noise_variance=1e-5)
            gpflow.utilities.set_trainable(gpr.likelihood, False)
            gprs.append((GaussianProcessRegression(gpr), 1))

        return ModelStack(*gprs)

    observer = mk_observer(VLMOP2().objective(), OBJECTIVE)

    initial_query_points = search_space.sample(10)
    initial_data = observer(initial_query_points)

    model = build_stacked_independent_objectives_model(initial_data[OBJECTIVE])

    dataset = (BayesianOptimizer(observer, search_space).optimize(
        num_steps, initial_data, {
            OBJECTIVE: model
        }, acquisition_rule).try_get_final_datasets()[OBJECTIVE])

    # A small log hypervolume difference corresponds to a succesful optimization.
    ref_point = get_reference_point(dataset.observations)

    obs_hv = Pareto(dataset.observations).hypervolume_indicator(ref_point)
    ideal_pf = tf.cast(VLMOP2().gen_pareto_optimal_points(100),
                       dtype=tf.float64)
    ideal_hv = Pareto(ideal_pf).hypervolume_indicator(ref_point)

    assert tf.math.log(ideal_hv - obs_hv) < -3.5
Example #5
0
# %% [markdown]
# ## Define the problem and model
#
# You can use Thompson sampling for Bayesian optimization in much the same way as we used EGO and EI in the tutorial _Introduction_. Since the setup is much the same is in that tutorial, we'll skip over most of the detail.
#
# We'll use a continuous bounded search space, and evaluate the observer at ten random points.

# %%
lower_bound = tf.constant([0.0, 0.0], gpflow.default_float())
upper_bound = tf.constant([1.0, 1.0], gpflow.default_float())
search_space = trieste.space.Box(lower_bound, upper_bound)

num_initial_data_points = 10
initial_query_points = search_space.sample(num_initial_data_points)
observer = mk_observer(branin, OBJECTIVE)
initial_data = observer(initial_query_points)

# %% [markdown]
# We'll use Gaussian process regression to model the function.

# %%
observations = initial_data[OBJECTIVE].observations
kernel = gpflow.kernels.Matern52(tf.math.reduce_variance(observations), [0.2, 0.2])
gpr = gpflow.models.GPR(astuple(initial_data[OBJECTIVE]), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)

model_config = {OBJECTIVE: {
    "model": gpr,
    "optimizer": gpflow.optimizers.Scipy(),
    "optimizer_args": {"options": dict(maxiter=100)},
Example #6
0
np.random.seed(42)
tf.random.set_seed(42)

# %% [markdown]
# ## Describe the problem
#
# In this example, we consider the same problem presented in our `expected_improvement` notebook, i.e. seeking the minimizer of the two-dimensional Branin function.
#
# We begin our optimization after collecting five function evaluations from random locations in the search space.

# %%
from trieste.utils.objectives import branin, mk_observer, BRANIN_MINIMUM
from trieste.space import Box

observer = mk_observer(branin)
search_space = Box([0, 0], [1, 1])

num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)

# %% [markdown]
# ## Surrogate model
# Just like in purely sequential optimization, we fit a surrogate Gaussian process model to the initial data.

# %%
import gpflow
from trieste.models import create_model
from trieste.utils import map_values