Exemple #1
0
def test_optimize_continuous_recovery_runs(num_failed_runs: int,
                                           num_recovery_runs: int) -> None:

    scipy_minimize = gpflow.optimizers.Scipy.minimize
    failed_runs = 0

    def mock_minimize(self: Scipy, *args: Any,
                      **kwargs: Any) -> OptimizeResult:
        nonlocal failed_runs
        result = scipy_minimize(self, *args, **kwargs)
        if failed_runs < num_failed_runs:
            failed_runs += 1
            result.success = False
        else:
            result.success = True
        return result

    with unittest.mock.patch("gpflow.optimizers.Scipy.minimize",
                             mock_minimize):
        optimizer = generate_continuous_optimizer(
            num_optimization_runs=1, num_recovery_runs=num_recovery_runs)
        if num_failed_runs > num_recovery_runs:
            with pytest.raises(FailedOptimizationError):
                optimizer(Box([-1], [1]), _quadratic_sum([0.5]))
        else:
            optimizer(Box([-1], [1]), _quadratic_sum([0.5]))
Exemple #2
0
def test_optimize_batch(search_space: Box, acquisition: AcquisitionFunction,
                        maximizer: TensorType, batch_size: int) -> None:
    batch_size_one_optimizer = generate_continuous_optimizer()
    batch_optimizer = batchify(batch_size_one_optimizer, batch_size)
    points = batch_optimizer(search_space, acquisition)
    assert points.shape == [batch_size] + search_space.lower.shape
    for point in points:
        npt.assert_allclose(tf.expand_dims(point, 0), maximizer, rtol=2e-4)
Exemple #3
0
def test_generate_continuous_optimizer_raises_with_invalid_init_params(
) -> None:
    with pytest.raises(ValueError):
        generate_continuous_optimizer(num_initial_samples=-5)
    with pytest.raises(ValueError):
        generate_continuous_optimizer(num_optimization_runs=-5)
    with pytest.raises(ValueError):
        generate_continuous_optimizer(num_optimization_runs=5,
                                      num_initial_samples=4)
    with pytest.raises(ValueError):
        generate_continuous_optimizer(num_recovery_runs=-5)
Exemple #4
0
def test_optimize_continuous_raises_for_impossible_optimization(
        num_optimization_runs: int, num_recovery_runs: int) -> None:
    search_space = Box([-1, -1], [1, 2])
    optimizer = generate_continuous_optimizer(
        num_optimization_runs=num_optimization_runs,
        num_recovery_runs=num_recovery_runs)
    with pytest.raises(FailedOptimizationError) as e:
        optimizer(search_space, _delta_function(10))
    assert (str(e.value) == f"""
                    Acquisition function optimization failed,
                    even after {num_recovery_runs + num_optimization_runs} restarts.
                    """)
Exemple #5
0
 "num_steps, acquisition_rule, convergence_threshold",
 [
     pytest.param(
         20,
         EfficientGlobalOptimization(
             ExpectedHypervolumeImprovement().using(OBJECTIVE)),
         -3.65,
         id="ehvi_vlmop2",
     ),
     pytest.param(
         15,
         EfficientGlobalOptimization(
             BatchMonteCarloExpectedHypervolumeImprovement(
                 sample_size=500).using(OBJECTIVE),
             num_query_points=2,
             optimizer=generate_continuous_optimizer(
                 num_initial_samples=500),
         ),
         -3.44,
         id="qehvi_vlmop2_q_2",
     ),
     pytest.param(
         10,
         EfficientGlobalOptimization(
             BatchMonteCarloExpectedHypervolumeImprovement(
                 sample_size=250).using(OBJECTIVE),
             num_query_points=4,
             optimizer=generate_continuous_optimizer(
                 num_initial_samples=500),
         ),
         -3.2095,
         id="qehvi_vlmop2_q_4",
# %% [markdown]
# ## Active learning using predictive variance
#
# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.
#
# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize trieste's `BayesianOptimizer` to do the active learning steps.
#

# %%
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition.function import PredictiveVariance

acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
    builder=acq, optimizer=generate_continuous_optimizer(sigmoid=False)
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)

# %% [markdown]
# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make trieste record our model at each iteration.

# %%
bo_iter = 5
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)

# %% [markdown]
# Then we can retrieve our final dataset from the active learning steps.

# %%
dataset = result.try_get_final_dataset()
Exemple #7
0
def test_optimize_batch_raises_with_invalid_batch_size() -> None:
    batch_size_one_optimizer = generate_continuous_optimizer()
    with pytest.raises(ValueError):
        batchify(batch_size_one_optimizer, -5)
Exemple #8
0
        (
            Box([-1, -2], [1.5, 2.5]),
            [1.0, 4],
            [[1.0, 2.5]],
        ),  # 2D with maximum outside search space
        (
            Box([-1, -2, 1], [1.5, 2.5, 1.5]),
            [0.3, -0.4, 0.5],
            [[0.3, -0.4, 1.0]],
        ),  # 3D
    ],
)
@pytest.mark.parametrize(
    "optimizer",
    [
        generate_continuous_optimizer(),
        generate_continuous_optimizer(num_optimization_runs=3),
        generate_continuous_optimizer(num_optimization_runs=3,
                                      num_recovery_runs=0),
        generate_continuous_optimizer(sigmoid=True),
        generate_continuous_optimizer(sigmoid=True, num_optimization_runs=3),
        generate_continuous_optimizer(
            sigmoid=True, num_optimization_runs=3, num_recovery_runs=0),
        generate_continuous_optimizer(
            sigmoid=True, num_optimization_runs=1, num_initial_samples=1),
    ],
)
def test_continuous_optimizer(
    search_space: Box,
    shift: list[float],
    expected_maximizer: list[list[float]],