def test_efficient_global_optimization(optimizer: AcquisitionOptimizer[Box]) -> None: class NegQuadratic(SingleModelAcquisitionBuilder): def __init__(self) -> None: self._updated = False def prepare_acquisition_function( self, model: ProbabilisticModel, dataset: Optional[Dataset] = None, ) -> AcquisitionFunction: return lambda x: -quadratic(tf.squeeze(x, -2) - 1) def update_acquisition_function( self, function: AcquisitionFunction, model: ProbabilisticModel, dataset: Optional[Dataset] = None, ) -> AcquisitionFunction: self._updated = True return function function = NegQuadratic() search_space = Box([-10], [10]) ego = EfficientGlobalOptimization(function, optimizer) data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1) query_point = ego.acquire_single(search_space, model, dataset=data) npt.assert_allclose(query_point, [[1]], rtol=1e-4) assert not function._updated query_point = ego.acquire(search_space, {OBJECTIVE: model}) npt.assert_allclose(query_point, [[1]], rtol=1e-4) assert function._updated
def test_efficient_global_optimization( optimizer: AcquisitionOptimizer[Box]) -> None: class NegQuadratic(SingleModelAcquisitionBuilder): def prepare_acquisition_function( self, dataset: Dataset, model: ProbabilisticModel) -> AcquisitionFunction: return lambda x: -quadratic(tf.squeeze(x, -2) - 1) search_space = Box([-10], [10]) ego = EfficientGlobalOptimization(NegQuadratic(), optimizer) data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1) query_point, _ = ego.acquire_single(search_space, data, model) npt.assert_allclose(query_point, [[1]], rtol=1e-4) query_point, _ = ego.acquire(search_space, {OBJECTIVE: data}, {OBJECTIVE: model}) npt.assert_allclose(query_point, [[1]], rtol=1e-4)
# ## Batch acquisition functions. # To perform batch BO, we must define a batch acquisition function. Two popular batch acquisition functions supported in Trieste are `BatchMonteCarloExpectedImprovement` and the `LocalPenalizationAcquisitionFunction`. Although both of these acquisition functions recommend batches of diverse query points, the batches are chosen in very different ways. `BatchMonteCarloExpectedImprovement` jointly allocates the batch of points as those with the largest expected improvement over our current best solution. In contrast, the `LocalPenalizationAcquisitionFunction` greedily builds the batch, sequentially adding the maximizers of the standard (non-batch) `ExpectedImprovement` function penalized around the current pending batch points. In practice, `BatchMonteCarloExpectedImprovement` can be expected to have superior performance for small batches (`batch_size`<10) but scales poorly for larger batches. # # Note that both of these acquisition functions have controllable parameters. In particular, `BatchMonteCarloExpectedImprovement` is computed using a Monte-Carlo method (so it requires a `sample_size`), but uses a reparametrisation trick to make it deterministic. The `LocalPenalizationAcquisitionFunction` has parameters controlling the degree of penalization that must be estimated from a random sample of `num_samples` model predictions. # %% [markdown] # First, we collect the batch of ten points recommended by `BatchMonteCarloExpectedImprovement` ... # %% from trieste.acquisition import BatchMonteCarloExpectedImprovement from trieste.acquisition.rule import EfficientGlobalOptimization batch_ei_acq = BatchMonteCarloExpectedImprovement(sample_size=1000) batch_ei_acq_rule = EfficientGlobalOptimization( # type: ignore num_query_points=10, builder=batch_ei_acq) points_chosen_by_batch_ei, _ = batch_ei_acq_rule.acquire_single( search_space, initial_data, model) # %% [markdown] # and then do the same with `LocalPenalizationAcquisitionFunction`. # %% from trieste.acquisition import LocalPenalizationAcquisitionFunction local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=1000) local_penalization_acq_rule = EfficientGlobalOptimization( # type: ignore num_query_points=10, builder=local_penalization_acq) points_chosen_by_local_penalization, _ = local_penalization_acq_rule.acquire_single( search_space, initial_data, model) # %% [markdown]
# In practice, `BatchMonteCarloExpectedImprovement` can be expected to have superior performance for small batches (`batch_size`<5) but scales poorly for larger batches. # # Note that all these acquisition functions have controllable parameters. In particular, `BatchMonteCarloExpectedImprovement` is computed using a Monte-Carlo method (so it requires a `sample_size`), but uses a reparametrisation trick to make it deterministic. The `LocalPenalizationAcquisitionFunction` has parameters controlling the degree of penalization that must be estimated from a random sample of `num_samples` model predictions (we recommend at least 1_000 for each search space dimension). Similarly, `GIBBON` requires a `grid_size` parameter that controls its approximation accuracy (which should also be larger than 1_000 for each search space dimension). # # %% [markdown] # First, we collect the batch of ten points recommended by `BatchMonteCarloExpectedImprovement` ... # %% from trieste.acquisition import BatchMonteCarloExpectedImprovement from trieste.acquisition.rule import EfficientGlobalOptimization batch_ei_acq = BatchMonteCarloExpectedImprovement(sample_size=1000, jitter=1e-5) batch_ei_acq_rule = EfficientGlobalOptimization( # type: ignore num_query_points=10, builder=batch_ei_acq) points_chosen_by_batch_ei = batch_ei_acq_rule.acquire_single(search_space, model, dataset=initial_data) # %% [markdown] # then we do the same with `LocalPenalizationAcquisitionFunction` ... # %% from trieste.acquisition import LocalPenalizationAcquisitionFunction local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000) local_penalization_acq_rule = EfficientGlobalOptimization( # type: ignore num_query_points=10, builder=local_penalization_acq) points_chosen_by_local_penalization = local_penalization_acq_rule.acquire_single( search_space, model, dataset=initial_data) # %% [markdown] # and finally we use `GIBBON`.