Beispiel #1
0
def test_ask_tell_optimizer_returns_optimization_result(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)

    result: OptimizationResult[None] = ask_tell.to_result()

    assert_datasets_allclose(result.try_get_final_dataset(), init_dataset)
    assert isinstance(result.try_get_final_model(), type(model))
    )
    async_bo.tell(new_data)

    # get a new batch of points
    # and launch workers for each point in the batch
    points = async_bo.ask().numpy()
    np.apply_along_axis(launch_worker, axis=1, arr=points)
    finished_workers = []

# %% [markdown]
# Let's plot the objective function and the points the optimization procedure explored.

# %%
from util.plotting import plot_function_2d, plot_bo_points

dataset = async_bo.to_result().try_get_final_dataset()
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(scaled_branin,
                         search_space.lower,
                         search_space.upper,
                         grid_density=30,
                         contour=True)

plot_bo_points(query_points,
               ax[0, 0],
               num_initial_points,
               arg_min_idx,
               c_pass="******")
Beispiel #3
0
    arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))

    suboptimality = observations - SCALED_BRANIN_MINIMUM.numpy()
    ax = plt.gca()
    plot_regret(suboptimality,
                ax,
                num_init=num_initial_points,
                idx_best=arg_min_idx)

    ax.set_yscale("log")
    ax.set_ylabel("Regret")
    ax.set_ylim(0.001, 100)
    ax.set_xlabel("# evaluations")


plot_ask_tell_regret(ask_tell.to_result())

# %% [markdown]
# ## Model selection: using only Ask part
#
# We now turn to a slightly more complex use case. Let's suppose we want to switch between two models depending on some criteria dynamically during the optimization loop, e.g. we want to be able to train a model outside of Trieste. In this case we can only use Ask part of the Ask-Tell interface.

# %%
model1 = build_model(initial_data,
                     kernel_func=lambda v: gpflow.kernels.RBF(variance=v))
model2 = build_model(initial_data,
                     kernel_func=lambda v: gpflow.kernels.Matern32(variance=v))

dataset = initial_data
for step in range(n_steps):
    # this criterion is meaningless
Beispiel #4
0
def test_ask_tell_optimization_finds_minima_of_the_scaled_branin_function(
    num_steps: int,
    reload_state: bool,
    acquisition_rule_fn: Callable[[], AcquisitionRule[TensorType, SearchSpace]]
    | Callable[[], AcquisitionRule[State[TensorType, AsynchronousGreedy.State
                                         | TrustRegion.State], Box], ],
) -> None:
    # For the case when optimization state is saved and reload on each iteration
    # we need to use new acquisition function object to imitate real life usage
    # hence acquisition rule factory method is passed in, instead of a rule object itself
    # it is then called to create a new rule whenever needed in the test

    search_space = BRANIN_SEARCH_SPACE

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance,
                                         tf.constant([0.2, 0.2], tf.float64))
        scale = tf.constant(1.0, dtype=tf.float64)
        kernel.variance.prior = tfp.distributions.LogNormal(
            tf.constant(-2.0, dtype=tf.float64), scale)
        kernel.lengthscales.prior = tfp.distributions.LogNormal(
            tf.math.log(kernel.lengthscales), scale)
        gpr = gpflow.models.GPR((data.query_points, data.observations),
                                kernel,
                                noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    initial_query_points = search_space.sample(5)
    observer = mk_observer(scaled_branin)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data)

    ask_tell = AskTellOptimizer(search_space, initial_data, model,
                                acquisition_rule_fn())

    for _ in range(num_steps):
        # two scenarios are tested here, depending on `reload_state` parameter
        # in first the same optimizer object is always used
        # in second new optimizer is created at each step from saved state
        new_point = ask_tell.ask()

        if reload_state:
            state: Record[None
                          | State[TensorType, AsynchronousGreedy.State
                                  | TrustRegion.State]] = ask_tell.to_record()
            written_state = pickle.dumps(state)

        new_data_point = observer(new_point)

        if reload_state:
            state = pickle.loads(written_state)
            ask_tell = AskTellOptimizer.from_record(state, search_space,
                                                    acquisition_rule_fn())

        ask_tell.tell(new_data_point)

    result: OptimizationResult[None | State[
        TensorType,
        AsynchronousGreedy.State | TrustRegion.State]] = ask_tell.to_result()
    dataset = result.try_get_final_dataset()

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs(
        (best_x - BRANIN_MINIMIZERS) / BRANIN_MINIMIZERS)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.05, axis=-1),
                         axis=0)
    npt.assert_allclose(best_y, SCALED_BRANIN_MINIMUM, rtol=0.005)