Beispiel #1
0
def test_ego(search_space: SearchSpace, expected_minimum: tf.Tensor) -> None:
    ego = EfficientGlobalOptimization(
        NegativeLowerConfidenceBound(0).using(OBJECTIVE))
    dataset = Dataset(tf.constant([[]]), tf.constant([[]]))
    query_point, _ = ego.acquire(search_space, {OBJECTIVE: dataset},
                                 {OBJECTIVE: QuadraticWithUnitVariance()})
    npt.assert_array_almost_equal(query_point, expected_minimum, decimal=5)
Beispiel #2
0
def test_efficient_global_optimization(optimizer: AcquisitionOptimizer[Box]) -> None:
    class NegQuadratic(SingleModelAcquisitionBuilder):
        def __init__(self) -> None:
            self._updated = False

        def prepare_acquisition_function(
            self,
            model: ProbabilisticModel,
            dataset: Optional[Dataset] = None,
        ) -> AcquisitionFunction:
            return lambda x: -quadratic(tf.squeeze(x, -2) - 1)

        def update_acquisition_function(
            self,
            function: AcquisitionFunction,
            model: ProbabilisticModel,
            dataset: Optional[Dataset] = None,
        ) -> AcquisitionFunction:
            self._updated = True
            return function

    function = NegQuadratic()
    search_space = Box([-10], [10])
    ego = EfficientGlobalOptimization(function, optimizer)
    data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1)
    query_point = ego.acquire_single(search_space, model, dataset=data)
    npt.assert_allclose(query_point, [[1]], rtol=1e-4)
    assert not function._updated
    query_point = ego.acquire(search_space, {OBJECTIVE: model})
    npt.assert_allclose(query_point, [[1]], rtol=1e-4)
    assert function._updated
Beispiel #3
0
def test_ego(search_space: SearchSpace, expected_minimum: tf.Tensor) -> None:
    ego = EfficientGlobalOptimization(
        NegativeLowerConfidenceBound(0).using(OBJECTIVE))
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    query_point, _ = ego.acquire(search_space, {OBJECTIVE: dataset},
                                 {OBJECTIVE: QuadraticMeanAndRBFKernel()})
    npt.assert_array_almost_equal(query_point, expected_minimum, decimal=5)
Beispiel #4
0
def test_efficient_global_optimization(
        optimizer: AcquisitionOptimizer[Box]) -> None:
    class NegQuadratic(AcquisitionFunctionBuilder):
        def prepare_acquisition_function(
                self, datasets: Mapping[str, Dataset],
                models: Mapping[str,
                                ProbabilisticModel]) -> AcquisitionFunction:
            return lambda x: -quadratic(tf.squeeze(x, -2) - 1)

    search_space = Box([-10], [10])
    ego = EfficientGlobalOptimization(NegQuadratic(), optimizer)
    data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1)
    query_point, _ = ego.acquire(search_space, {"": data}, {"": model})
    npt.assert_allclose(query_point, [[1]], rtol=1e-4)
Beispiel #5
0
def test_joint_batch_acquisition_rule_acquire() -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    num_query_points = 4
    acq = _JointBatchModelMinusMeanMaximumSingleBuilder()
    ego: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(
        acq, num_query_points=num_query_points
    )
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    query_point = ego.acquire_single(search_space, dataset, QuadraticMeanAndRBFKernel())

    npt.assert_allclose(query_point, [[0.0, 0.0]] * num_query_points, atol=1e-3)
Beispiel #6
0
# %% [markdown]
# ## Batch acquisition functions.
# To perform batch BO, we must define a batch acquisition function. Two popular batch acquisition functions supported in Trieste are `BatchMonteCarloExpectedImprovement` and the `LocalPenalizationAcquisitionFunction`. Although both of these acquisition functions recommend batches of diverse query points, the batches are chosen in very different ways. `BatchMonteCarloExpectedImprovement` jointly allocates the batch of points as those with the largest expected improvement over our current best solution. In contrast, the `LocalPenalizationAcquisitionFunction` greedily builds the batch, sequentially adding the maximizers of the standard (non-batch) `ExpectedImprovement` function penalized around the current pending batch points. In practice, `BatchMonteCarloExpectedImprovement` can be expected to have superior performance for small batches (`batch_size`<10) but scales poorly for larger batches.
#
# Note that both of these acquisition functions have controllable parameters. In particular, `BatchMonteCarloExpectedImprovement` is computed using a Monte-Carlo method (so it requires a `sample_size`), but uses a reparametrisation trick to make it deterministic. The `LocalPenalizationAcquisitionFunction` has parameters controlling the degree of penalization that must be estimated from a random sample of `num_samples` model predictions.

# %% [markdown]
# First, we collect the batch of ten points recommended by `BatchMonteCarloExpectedImprovement` ...

# %%
from trieste.acquisition import BatchMonteCarloExpectedImprovement
from trieste.acquisition.rule import EfficientGlobalOptimization

batch_ei_acq = BatchMonteCarloExpectedImprovement(sample_size=1000)
batch_ei_acq_rule = EfficientGlobalOptimization(  # type: ignore
    num_query_points=10, builder=batch_ei_acq)
points_chosen_by_batch_ei, _ = batch_ei_acq_rule.acquire_single(
    search_space, initial_data, model)

# %% [markdown]
# and then do the same with `LocalPenalizationAcquisitionFunction`.

# %%
from trieste.acquisition import LocalPenalizationAcquisitionFunction

local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space,
                                                              num_samples=1000)
local_penalization_acq_rule = EfficientGlobalOptimization(  # type: ignore
    num_query_points=10, builder=local_penalization_acq)
points_chosen_by_local_penalization, _ = local_penalization_acq_rule.acquire_single(
    search_space, initial_data, model)
Beispiel #7
0
     Tuple[
         int,
         Union[
             AcquisitionRule[TensorType, Box],
             AcquisitionRule[
                 State[
                     TensorType,
                     Union[AsynchronousGreedy.State, TrustRegion.State],
                 ],
                 Box,
             ],
         ],
     ]
 ],
 [
     (20, EfficientGlobalOptimization()),
     (25, EfficientGlobalOptimization(AugmentedExpectedImprovement().using(OBJECTIVE))),
     (
         22,
         EfficientGlobalOptimization(
             MinValueEntropySearch(BRANIN_SEARCH_SPACE, num_fourier_features=1000).using(
                 OBJECTIVE
             )
         ),
     ),
     (
         12,
         EfficientGlobalOptimization(
             BatchMonteCarloExpectedImprovement(sample_size=500).using(OBJECTIVE),
             num_query_points=3,
         ),
Beispiel #8
0

initial_models = trieste.utils.map_values(create_bo_model, initial_data)

# %% [markdown]
# ## Define the acquisition process
#
# We can construct the _expected constrained improvement_ acquisition function defined in <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, where they use the probability of feasibility with respect to the constraint model.

# %%
from trieste.acquisition.rule import EfficientGlobalOptimization

pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
eci = trieste.acquisition.ExpectedConstrainedImprovement(
    OBJECTIVE, pof.using(CONSTRAINT))
rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(eci)

# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop

# %%
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

data = bo.optimize(num_steps,
                   initial_data,
                   initial_models,
                   rule,
                   track_state=False).try_get_final_datasets()
Beispiel #9
0

class _JointBatchModelMinusMeanMaximumSingleBuilder(AcquisitionFunctionBuilder):
    def prepare_acquisition_function(
        self,
        models: Mapping[str, ProbabilisticModel],
        datasets: Optional[Mapping[str, Dataset]] = None,
    ) -> AcquisitionFunction:
        return lambda at: -tf.reduce_max(models[OBJECTIVE].predict(at)[0], axis=-2)


@random_seed
@pytest.mark.parametrize(
    "rule_fn",
    [
        lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),
        lambda acq, batch_size: AsynchronousOptimization(acq, num_query_points=batch_size),
    ],
)
# As a side effect, this test ensures and EGO and AsynchronousOptimization
# behave similarly in sync mode
def test_joint_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_JointBatchModelMinusMeanMaximumSingleBuilder, int],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box],
    ]
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
from trieste.data import Dataset
from trieste.models import GaussianProcessRegression
from trieste.models.model_interfaces import ModelStack
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.utils.multi_objectives import VLMOP2
from trieste.utils.objectives import mk_observer
from trieste.utils.pareto import Pareto, get_reference_point


@random_seed
@pytest.mark.parametrize(
    "num_steps, acquisition_rule",
    [
        (20,
         EfficientGlobalOptimization(
             ExpectedHypervolumeImprovement().using(OBJECTIVE))),
    ],
)
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
        num_steps: int, acquisition_rule: AcquisitionRule) -> None:
    search_space = Box([-2, -2], [2, 2])

    def build_stacked_independent_objectives_model(
            data: Dataset) -> ModelStack:
        gprs = []
        for idx in range(2):
            single_obj_data = Dataset(
                data.query_points, tf.gather(data.observations, [idx], axis=1))
            variance = tf.math.reduce_variance(single_obj_data.observations)
            kernel = gpflow.kernels.Matern52(
                variance, tf.constant([0.2, 0.2], tf.float64))
Beispiel #11
0
from trieste.models.gpflow import GaussianProcessRegression
from trieste.models.interfaces import ModelStack
from trieste.objectives.multi_objectives import VLMOP2
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import TensorType


@random_seed
@pytest.mark.parametrize(
    "num_steps, acquisition_rule, convergence_threshold",
    [
        pytest.param(
            20,
            EfficientGlobalOptimization(
                ExpectedHypervolumeImprovement().using(OBJECTIVE)),
            -3.65,
            id="ehvi_vlmop2",
        ),
        pytest.param(
            15,
            EfficientGlobalOptimization(
                BatchMonteCarloExpectedHypervolumeImprovement(
                    sample_size=500).using(OBJECTIVE),
                num_query_points=2,
                optimizer=generate_continuous_optimizer(
                    num_initial_samples=500),
            ),
            -3.44,
            id="qehvi_vlmop2_q_2",
        ),
Beispiel #12
0
from trieste.observer import OBJECTIVE
from trieste.space import Box, SearchSpace
from trieste.types import State, TensorType


@random_seed
@pytest.mark.parametrize(
    "num_steps, reload_state, acquisition_rule_fn",
    cast(
        List[Tuple[int, bool,
                   Union[Callable[[], AcquisitionRule[TensorType, Box]],
                         Callable[[], AcquisitionRule[State[TensorType, Union[
                             AsynchronousGreedy.State, TrustRegion.State], ],
                                                      Box, ], ], ], ]],
        [
            (20, False, lambda: EfficientGlobalOptimization()),
            (20, True, lambda: EfficientGlobalOptimization()),
            (15, False, lambda: TrustRegion()),
            (15, True, lambda: TrustRegion()),
            (
                10,
                False,
                lambda: EfficientGlobalOptimization(
                    LocalPenalizationAcquisitionFunction(
                        BRANIN_SEARCH_SPACE, ).using(OBJECTIVE),
                    num_query_points=3,
                ),
            ),
            (
                30,
                False,
Beispiel #13
0
def test_efficient_global_optimization_raises_for_no_batch_fn_with_many_query_points(
) -> None:
    with pytest.raises(ValueError):
        EfficientGlobalOptimization(num_query_points=2)
    ThompsonSampling,
    TrustRegion,
)
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import GaussianProcessRegression
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.utils.objectives import BRANIN_MINIMIZERS, BRANIN_MINIMUM, branin, mk_observer


@random_seed
@pytest.mark.parametrize(
    "num_steps, acquisition_rule",
    [
        (20, EfficientGlobalOptimization()),
        (20,
         EfficientGlobalOptimization(
             AugmentedExpectedImprovement().using(OBJECTIVE))),
        (
            15,
            EfficientGlobalOptimization(
                MinValueEntropySearch(Box([0, 0], [1, 1]),
                                      grid_size=1000,
                                      num_samples=10).using(OBJECTIVE)),
        ),
        (
            15,
            EfficientGlobalOptimization(
                BatchMonteCarloExpectedImprovement(
                    sample_size=500).using(OBJECTIVE),
Beispiel #15
0
            }
        }), 1))

    return ModelStack(*gprs)


# %%
model = build_stacked_independent_objectives_model(initial_data, num_objective)

# %% [markdown]
# ## Define the acquisition function
# Here we utilize the [EHVI](https://link.springer.com/article/10.1007/s10898-019-00798-7): `ExpectedHypervolumeImprovement` acquisition function:

# %%
ehvi = ExpectedHypervolumeImprovement()
rule = EfficientGlobalOptimization(builder=ehvi)  # type: ignore

# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop

# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)

# %% [markdown]
# To conclude, we visualize the queried data across the design space.
# We represent the initial points as crosses and the points obtained by our optimization loop as dots.
def test_optimizer_finds_minima_of_Gardners_Simulation_1(
    num_steps: int, acquisition_function_builder
) -> None:
    """
    Test that tests the covergence of constrained BO algorithms on the
    synthetic "simulation 1" experiment of :cite:`gardner14`.
    """
    search_space = Box([0, 0], [6, 6])

    def objective(input_data):
        x, y = input_data[..., -2], input_data[..., -1]
        z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
        return z[:, None]

    def constraint(input_data):
        x, y = input_data[:, -2], input_data[:, -1]
        z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
        return z[:, None]

    MINIMUM = -2.0
    MINIMIZER = [math.pi * 1.5, 0.0]

    OBJECTIVE = "OBJECTIVE"
    CONSTRAINT = "CONSTRAINT"

    def observer(query_points):  # observe both objective and constraint data
        return {
            OBJECTIVE: Dataset(query_points, objective(query_points)),
            CONSTRAINT: Dataset(query_points, constraint(query_points)),
        }

    num_initial_points = 5
    initial_data = observer(search_space.sample(num_initial_points))

    def build_model(data):
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
        gpr = gpflow.models.GPR((data.query_points, data.observations), kernel, noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    models = map_values(build_model, initial_data)

    pof = ProbabilityOfFeasibility(threshold=0.5)
    acq = acquisition_function_builder(OBJECTIVE, pof.using(CONSTRAINT))
    rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(acq)

    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, models, rule)
        .try_get_final_datasets()[OBJECTIVE]
    )

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs(best_x - MINIMIZER)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_all(relative_minimizer_err < 0.03, axis=-1)
    npt.assert_allclose(best_y, MINIMUM, rtol=0.03)
# %% [markdown]
# ## Active learning using predictive variance
#
# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.
#
# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize trieste's `BayesianOptimizer` to do the active learning steps.
#

# %%
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition.function import PredictiveVariance

acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
    builder=acq, optimizer=generate_continuous_optimizer(sigmoid=False)
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

# %% [markdown]
# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make trieste record our model at each iteration.

# %%
bo_iter = 5
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)

# %% [markdown]
# Then we can retrieve our final dataset from the active learning steps.

# %%
dataset = result.try_get_final_dataset()
Beispiel #18
0
initial_models = trieste.utils.map_values(create_bo_model, initial_data)

# %% [markdown]
# ## Define the acquisition process
#
# We can construct the _expected constrained improvement_ acquisition function defined in <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, where they use the probability of feasibility with respect to the constraint model.

# %%
from trieste.acquisition.rule import EfficientGlobalOptimization

pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
eci = trieste.acquisition.ExpectedConstrainedImprovement(
    OBJECTIVE, pof.using(CONSTRAINT)
)
rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(eci)

# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop

# %%
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

data = bo.optimize(
    num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()

# %% [markdown]
from trieste.space import Box
from trieste.utils.objectives import (
    branin,
    BRANIN_GLOBAL_MINIMUM,
    BRANIN_GLOBAL_ARGMIN,
    mk_observer,
)

from tests.util.misc import random_seed


@random_seed
@pytest.mark.parametrize(
    "num_steps, acquisition_rule",
    [
        (30, EfficientGlobalOptimization()),
        (22, TrustRegion()),
        (17, ThompsonSampling(500, 3)),
    ],
)
def test_optimizer_finds_minima_of_the_branin_function(
        num_steps: int, acquisition_rule: AcquisitionRule) -> None:
    search_space = Box(tf.constant([0.0, 0.0], tf.float64),
                       tf.constant([1.0, 1.0], tf.float64))

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance,
                                         tf.constant([0.2, 0.2], tf.float64))
        gpr = gpflow.models.GPR((data.query_points, data.observations),
                                kernel,
Beispiel #20
0
        self,
        function: Optional[AcquisitionFunction],
        dataset: Dataset,
        model: ProbabilisticModel,
        pending_points: Optional[TensorType] = None,
        new_optimization_step: bool = True,
    ) -> AcquisitionFunction:
        self._update_count += 1
        return self.prepare_acquisition_function(dataset, model, pending_points)


@random_seed
@pytest.mark.parametrize(
    "rule_fn, num_query_points",
    [
        (lambda acq: EfficientGlobalOptimization(acq, num_query_points=4), 4),
        (lambda acq: AsynchronousGreedy(acq), 1),
    ],
)
# As a side effect, this test ensures and EGO and AsynchronousGreedy
# behave similarly in sync mode
def test_greedy_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_GreedyBatchModelMinusMeanMaximumSingleBuilder],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousGreedy.State], Box],
    ],
    num_query_points: int,
) -> None:
Beispiel #21
0
                                 ExpectedImprovement, Product)


class ProbabilityOfValidity(SingleModelAcquisitionBuilder):
    def prepare_acquisition_function(self, model, dataset=None):
        def acquisition(at):
            mean, _ = model.predict_y(tf.squeeze(at, -2))
            return mean

        return acquisition


ei = ExpectedImprovement()
pov = ProbabilityOfValidity()
acq_fn = Product(ei.using(OBJECTIVE), pov.using(FAILURE))
rule = EfficientGlobalOptimization(acq_fn)  # type: ignore

# %% [markdown]
# ## Run the optimizer
#
# Now, we run the Bayesian optimization loop for twenty steps, and print the location of the query point corresponding to the minimum observation.

# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

result = bo.optimize(20, initial_data, models, rule).final_result.unwrap()

arg_min_idx = tf.squeeze(
    tf.argmin(result.datasets[OBJECTIVE].observations, axis=0))
print(
    f"query point: {result.datasets[OBJECTIVE].query_points[arg_min_idx, :]}")
Beispiel #22
0
initial_models = trieste.utils.map_values(create_bo_model, initial_data)

# %% [markdown]
# ## Define the acquisition process
#
# We can construct the _expected constrained improvement_ acquisition function defined in <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, where they use the probability of feasibility with respect to the constraint model.

# %%
from trieste.acquisition.rule import EfficientGlobalOptimization

pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
eci = trieste.acquisition.ExpectedConstrainedImprovement(
    OBJECTIVE, pof.using(CONSTRAINT)
)
rule = EfficientGlobalOptimization(eci)  # type: ignore

# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop. We obtain the final objective and constraint data using `.try_get_final_datasets()`.

# %%
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

data = bo.optimize(
    num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()

# %% [markdown]
Beispiel #23
0
    EfficientGlobalOptimization,
    ThompsonSampling,
    TrustRegion,
)
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import GaussianProcessRegression
from trieste.space import Box
from trieste.utils.objectives import BRANIN_MINIMIZERS, BRANIN_MINIMUM, branin, mk_observer


@random_seed
@pytest.mark.parametrize(
    "num_steps, acquisition_rule",
    [
        (20, EfficientGlobalOptimization()),
        (
            15,
            EfficientGlobalOptimization(
                BatchMonteCarloExpectedImprovement(sample_size=500).using(OBJECTIVE),
                num_query_points=2,
            ),
        ),
        (15, TrustRegion()),
        (17, ThompsonSampling(500, 3)),
    ],
)
def test_optimizer_finds_minima_of_the_branin_function(
    num_steps: int, acquisition_rule: AcquisitionRule
) -> None:
    search_space = Box([0, 0], [1, 1])
                    }
                })), 1))

    return ModelStack(*gprs)


# %%
model = build_stacked_independent_objectives_model(initial_data, num_objective)

# %% [markdown]
# ## Define the acquisition function
# Here we utilize the [EHVI](https://link.springer.com/article/10.1007/s10898-019-00798-7): `ExpectedHypervolumeImprovement` acquisition function:

# %%
ehvi = ExpectedHypervolumeImprovement()
rule: EfficientGlobalOptimization = EfficientGlobalOptimization(builder=ehvi)

# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop

# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)

# %% [markdown]
# To conclude, we visualize the queried data across the design space.
# We represent the initial points as crosses and the points obtained by our optimization loop as dots.
    ax=ax[0, 0],
    num_init=len(dataset.query_points),
    idx_best=arg_min_idx,
)

# %% [markdown]
# ## Batch-sequential strategy
#
# Sometimes it is practically convenient to query several points at a time. We can do this in `trieste` using a `BatchAcquisitionRule` and a `BatchAcquisitionFunctionBuilder`, that together recommend a number of query points `num_query_points` (instead of one as previously). The optimizer then queries the observer at all these points simultaneously.
# Here we use the `BatchMonteCarloExpectedImprovement` function. Note that this acquisition function is computed using a Monte-Carlo method (so it requires a `sample_size`), but with a reparametrisation trick, which makes it deterministic.

# %%
from trieste.acquisition.rule import EfficientGlobalOptimization

qei = trieste.acquisition.BatchMonteCarloExpectedImprovement(sample_size=1000)
batch_rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(
    num_query_points=3, builder=qei.using(OBJECTIVE))

model = build_model(initial_data[OBJECTIVE])
batch_result = bo.optimize(5, initial_data, model, acquisition_rule=batch_rule)

# %% [markdown]
# We can again visualise the GP model and query points.

# %%
batch_dataset = batch_result.try_get_final_datasets()[OBJECTIVE]
batch_query_points = batch_dataset.query_points.numpy()
batch_observations = batch_dataset.observations.numpy()
fig = plot_gp_plotly(
    batch_result.try_get_final_models()[OBJECTIVE].model,  # type: ignore
    search_space.lower,
    search_space.upper,
Beispiel #26
0
# We'll need a custom acquisition function for this problem. This function is the product of the expected improvement for the objective data and the predictive mean for the failure data. We can specify which data and model to use in each acquisition function builder with the `OBJECTIVE` and `FAILURE` labels. We'll optimize the function using EfficientGlobalOptimization.

# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition import (
    SingleModelAcquisitionBuilder, ExpectedImprovement, Product, lower_confidence_bound
)

class ProbabilityOfValidity(SingleModelAcquisitionBuilder):
    def prepare_acquisition_function(self, dataset, model):
        return lower_confidence_bound(model, 0.0)

ei = ExpectedImprovement()
pov = ProbabilityOfValidity()
acq_fn = Product(ei.using(OBJECTIVE), pov.using(FAILURE))
rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(acq_fn)

# %% [markdown]
# ## Run the optimizer
#
# Now, we run the Bayesian optimization loop for twenty steps, and print the location of the query point corresponding to the minimum observation.

# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

result = bo.optimize(20, initial_data, models, rule).final_result.unwrap()

arg_min_idx = tf.squeeze(tf.argmin(result.datasets[OBJECTIVE].observations, axis=0))
print(f"query point: {result.datasets[OBJECTIVE].query_points[arg_min_idx, :]}")

# %% [markdown]