Пример #1
0
def test_ask_tell_optimizer_updates_state_with_new_data(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    new_data = mk_dataset([[1.0]], [[1.0]])
    ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)

    ask_tell.tell(new_data)
    state_record: Record[None] = ask_tell.to_record()

    assert_datasets_allclose(state_record.dataset, init_dataset + new_data)
def test_ask_tell_optimizer_tell_validates_keys(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    dataset_with_key_1 = {"1": init_dataset}
    model_with_key_1 = {"1": model}
    new_data_with_key_2 = {"2": mk_dataset([[1.0]], [[1.0]])}

    ask_tell = AskTellOptimizer(search_space, dataset_with_key_1,
                                model_with_key_1, acquisition_rule)
    with pytest.raises(ValueError):
        ask_tell.tell(new_data_with_key_2)
Пример #3
0
def test_ask_tell_optimizer_trains_model(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    new_data = mk_dataset([[1.0]], [[1.0]])
    ask_tell = AskTellOptimizer(
        search_space, init_dataset, model, acquisition_rule, fit_model=False
    )

    ask_tell.tell(new_data)
    state_record: Record[None] = ask_tell.to_record()

    assert state_record.model.optimize_count == 1  # type: ignore
    new_observations = [
        observation for worker in finished_workers
        for observation in ray.get(worker)
    ]

    # new_observations is a list of tuples (point, observation value)
    # here we turn it into a Dataset and tell it to Trieste
    points_observed += len(new_observations)
    new_data = Dataset(
        query_points=tf.constant([x[0] for x in new_observations],
                                 dtype=tf.float64),
        observations=tf.constant([x[1] for x in new_observations],
                                 dtype=tf.float64),
    )
    async_bo.tell(new_data)

    # get a new batch of points
    # and launch workers for each point in the batch
    points = async_bo.ask().numpy()
    np.apply_along_axis(launch_worker, axis=1, arr=points)
    finished_workers = []

# %% [markdown]
# Let's plot the objective function and the points the optimization procedure explored.

# %%
from util.plotting import plot_function_2d, plot_bo_points

dataset = async_bo.to_result().try_get_final_dataset()
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
Пример #5
0
# %%
import timeit

model = build_model(initial_data)
ask_tell = AskTellOptimizer(search_space, initial_data, model)

for step in range(n_steps):
    start = timeit.default_timer()
    new_point = ask_tell.ask()
    stop = timeit.default_timer()

    print(f"Time at step {step + 1}: {stop - start}")

    new_data = observer(new_point)
    ask_tell.tell(new_data)

# %% [markdown]
# Once ask-tell optimization is over, you can extract an optimization result object and perform whatever analysis you need, just like with regular Trieste optimization interface. For instance, here we will plot regret for each optimization step.


# %%
def plot_ask_tell_regret(ask_tell_result):
    observations = ask_tell_result.try_get_final_dataset().observations.numpy()
    arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))

    suboptimality = observations - SCALED_BRANIN_MINIMUM.numpy()
    ax = plt.gca()
    plot_regret(suboptimality,
                ax,
                num_init=num_initial_points,
Пример #6
0
def test_ask_tell_optimization_finds_minima_of_the_scaled_branin_function(
    num_steps: int,
    reload_state: bool,
    acquisition_rule_fn: Callable[[], AcquisitionRule[TensorType, SearchSpace]]
    | Callable[[], AcquisitionRule[State[TensorType, AsynchronousGreedy.State
                                         | TrustRegion.State], Box], ],
) -> None:
    # For the case when optimization state is saved and reload on each iteration
    # we need to use new acquisition function object to imitate real life usage
    # hence acquisition rule factory method is passed in, instead of a rule object itself
    # it is then called to create a new rule whenever needed in the test

    search_space = BRANIN_SEARCH_SPACE

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance,
                                         tf.constant([0.2, 0.2], tf.float64))
        scale = tf.constant(1.0, dtype=tf.float64)
        kernel.variance.prior = tfp.distributions.LogNormal(
            tf.constant(-2.0, dtype=tf.float64), scale)
        kernel.lengthscales.prior = tfp.distributions.LogNormal(
            tf.math.log(kernel.lengthscales), scale)
        gpr = gpflow.models.GPR((data.query_points, data.observations),
                                kernel,
                                noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    initial_query_points = search_space.sample(5)
    observer = mk_observer(scaled_branin)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data)

    ask_tell = AskTellOptimizer(search_space, initial_data, model,
                                acquisition_rule_fn())

    for _ in range(num_steps):
        # two scenarios are tested here, depending on `reload_state` parameter
        # in first the same optimizer object is always used
        # in second new optimizer is created at each step from saved state
        new_point = ask_tell.ask()

        if reload_state:
            state: Record[None
                          | State[TensorType, AsynchronousGreedy.State
                                  | TrustRegion.State]] = ask_tell.to_record()
            written_state = pickle.dumps(state)

        new_data_point = observer(new_point)

        if reload_state:
            state = pickle.loads(written_state)
            ask_tell = AskTellOptimizer.from_record(state, search_space,
                                                    acquisition_rule_fn())

        ask_tell.tell(new_data_point)

    result: OptimizationResult[None | State[
        TensorType,
        AsynchronousGreedy.State | TrustRegion.State]] = ask_tell.to_result()
    dataset = result.try_get_final_dataset()

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs(
        (best_x - BRANIN_MINIMIZERS) / BRANIN_MINIMIZERS)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.05, axis=-1),
                         axis=0)
    npt.assert_allclose(best_y, SCALED_BRANIN_MINIMUM, rtol=0.005)