Exemplo n.º 1
0
def plot_active_learning_query(result, bo_iter, num_initial_points, query_points, num_query=1):

    for i in range(bo_iter):

        def pred_var(x):
            _, var = result.history[i].models["OBJECTIVE"].model.predict_f(x)
            return var

        _, ax = plot_function_2d(
            pred_var,
            search_space.lower - 0.01,
            search_space.upper + 0.01,
            grid_density=100,
            contour=True,
            colorbar=True,
            figsize=(10, 6),
            title=["Variance contour with queried points at iter:" + str(i + 1)],
            xlabel="$X_1$",
            ylabel="$X_2$",
        )

        plot_bo_points(
            query_points[: num_initial_points + (i * num_query)], ax[0, 0], num_initial_points
        )
print(f"query point: {query_points[arg_min_idx, :]}")
print(f"observation: {observations[arg_min_idx, :]}")

# %% [markdown]
# We can visualise how the optimizer performed by plotting all the acquired observations, along with the true function values and optima, either in a two-dimensional contour plot ...

# %%
from util.plotting import plot_bo_points, plot_function_2d

_, ax = plot_function_2d(scaled_branin,
                         search_space.lower,
                         search_space.upper,
                         grid_density=30,
                         contour=True)
plot_bo_points(query_points, ax[0, 0], num_initial_points, arg_min_idx)
ax[0, 0].set_xlabel(r'$x_1$')
ax[0, 0].set_xlabel(r'$x_2$')

# %% [markdown]
# ... or as a three-dimensional plot

# %%
from util.plotting_plotly import add_bo_points_plotly

fig = plot_function_plotly(scaled_branin,
                           search_space.lower,
                           search_space.upper,
                           grid_density=20)
fig.update_layout(height=500, width=500)
    observer.manual_fix()

    result, new_history = bo.optimize(
        15 - len(history),
        history[-1].datasets,
        history[-1].models,
        acquisition_rule,
        history[-1].acquisition_state
    ).astuple()

    history.extend(new_history)

# %% [markdown]
# We can repeat this until we've spent our optimization budget, using a loop if appropriate. But here, we'll just plot the data if it exists, safely by using `result`'s `is_ok` attribute.

# %%
from util.plotting import plot_bo_points, plot_function_2d

if result.is_ok:
    data = result.unwrap().datasets[OBJECTIVE]
    arg_min_idx = tf.squeeze(tf.argmin(data.observations, axis=0))
    _, ax = plot_function_2d(
        branin, search_space.lower, search_space.upper, 30, contour=True
    )
    plot_bo_points(data.query_points.numpy(), ax[0, 0], 5, arg_min_idx)

# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
Exemplo n.º 4
0
# We can visualise where the optimizer queried on a contour plot of the Branin with the failure region. The minimum observation can be seen along the bottom axis towards the right, outside of the failure region.

# %%
import matplotlib.pyplot as plt
from util.plotting import plot_gp_2d, plot_function_2d, plot_bo_points

mask_fail = result.datasets[FAILURE].observations.numpy().flatten().astype(
    int) == 0
fig, ax = plot_function_2d(masked_branin,
                           search_space.lower,
                           search_space.upper,
                           grid_density=50,
                           contour=True)
plot_bo_points(
    result.datasets[FAILURE].query_points.numpy(),
    ax=ax[0, 0],
    num_init=num_init_points,
    mask_fail=mask_fail,
)
plt.show()

# %% [markdown]
# We can also plot the mean and variance of the predictive distribution over the search space, first for the objective data and model ...

# %%
from util.plotting_plotly import plot_gp_plotly, add_bo_points_plotly

arg_min_idx = tf.squeeze(
    tf.argmin(result.datasets[OBJECTIVE].observations, axis=0))

fig = plot_gp_plotly(result.models[OBJECTIVE].model,
                     search_space.lower,
Exemplo n.º 5
0
if result.error is not None: raise result.error

final_data = result.datasets

arg_min_idx = tf.squeeze(tf.argmin(final_data[OBJECTIVE].observations, axis=0))
print(f"query point: {final_data[OBJECTIVE].query_points[arg_min_idx, :]}")

# %% [markdown]
# We can visualise where the optimizer queried on a contour plot of the Branin with the failure region. The minimum observation can be seen along the bottom axis towards the right, outside of the failure region.

# %%
mask_fail = final_data[FAILURE].observations.numpy().flatten().astype(int) == 0
fig, ax = plot_function_2d(masked_branin, mins, maxs, grid_density=50, contour=True)
plot_bo_points(
    final_data[FAILURE].query_points.numpy(),
    ax=ax[0, 0],
    num_init=num_init_points,
    mask_fail=mask_fail,
)
plt.show()

# %% [markdown]
# We can also plot the mean and variance of the predictive distribution over the search space, first for the objective data and model ...

# %%
arg_min_idx = tf.squeeze(tf.argmin(final_data[OBJECTIVE].observations, axis=0))

fig = plot_gp_plotly(regression_model, mins, maxs, grid_density=50)
fig = add_bo_points_plotly(
    x=final_data[OBJECTIVE].query_points[:, 0].numpy(),
    y=final_data[OBJECTIVE].query_points[:, 1].numpy(),
    z=final_data[OBJECTIVE].observations.numpy().flatten(),
Exemplo n.º 6
0
dataset = result.datasets[OBJECTIVE]

# %% [markdown]
# ## Visualising the result
#
# We can take a look at where we queried the observer, both the original query points (crosses) and new query points (dots), and where they lie with respect to the contours of the Branin.

# %%
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(
    branin, lower_bound.numpy(), upper_bound.numpy(), grid_density=30, contour=True
)

plot_bo_points(query_points, ax[0, 0], num_initial_data_points, arg_min_idx)

# %% [markdown]
# We can also visualise the observations on a three-dimensional plot of the Branin. We'll add the contours of the mean and variance of the model's predictive distribution as translucent surfaces.

# %%
fig = plot_gp_plotly(gpr, lower_bound.numpy(), upper_bound.numpy(), grid_density=30)
fig = add_bo_points_plotly(
    x=query_points[:, 0],
    y=query_points[:, 1],
    z=observations[:, 0],
    num_init=num_initial_data_points,
    idx_best=arg_min_idx,
    fig=fig,
    figrow=1,
    figcol=1,
    np.apply_along_axis(launch_worker, axis=1, arr=points)
    finished_workers = []

# %% [markdown]
# Let's plot the objective function and the points the optimization procedure explored.

# %%
from util.plotting import plot_function_2d, plot_bo_points

dataset = async_bo.to_result().try_get_final_dataset()
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(scaled_branin,
                         search_space.lower,
                         search_space.upper,
                         grid_density=30,
                         contour=True)

plot_bo_points(query_points,
               ax[0, 0],
               num_initial_points,
               arg_min_idx,
               c_pass="******")

# %%
ray.shutdown(
)  # "Undo ray.init()". Terminate all the processes started in this notebook.

# %%
# ... and visualise the data across the design space: each figure contains the contour lines of each objective function.

# %%
_, ax = plot_function_2d(
    vlmop2,
    mins,
    maxs,
    grid_density=100,
    contour=True,
    title=["Obj 1", "Obj 2"],
    figsize=(12, 6),
    colorbar=True,
    xlabel="$X_1$",
    ylabel="$X_2$",
)
plot_bo_points(initial_query_points, ax=ax[0, 0], num_init=num_initial_points)
plot_bo_points(initial_query_points, ax=ax[0, 1], num_init=num_initial_points)
plt.show()

# %% [markdown]
# ... and in the objective space. The `plot_mobo_points_in_obj_space` will automatically search for non-dominated points and colours them in purple.

# %%
plot_mobo_points_in_obj_space(initial_data.observations)
plt.show()

# %% [markdown]
# ## Modelling the two functions
#
# In this example we model the two objective functions individually with their own Gaussian process models, for problems where the objective functions are similar it may make sense to build a joint model.
#
Exemplo n.º 9
0
    y[mask_nan] = np.nan
    return tf.convert_to_tensor(y.reshape(-1, 1), x.dtype)


mask_fail1 = data[CONSTRAINT].observations.numpy().flatten().astype(
    int) > Sim2.threshold
mask_fail2 = data[CONSTRAINT].observations.numpy().flatten().astype(
    int) > Sim2.threshold2
mask_fail = np.logical_or(mask_fail1, mask_fail2)

import matplotlib.pyplot as plt
from util.plotting import plot_function_2d, plot_bo_points

fig, ax = plot_function_2d(masked_objective,
                           search_space.lower,
                           search_space.upper,
                           grid_density=50,
                           contour=True)
plot_bo_points(
    data[OBJECTIVE].query_points.numpy(),
    ax=ax[0, 0],
    num_init=num_initial_points,
    mask_fail=mask_fail,
)
plt.show()

# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
Exemplo n.º 10
0
#
# We can take a look at where we queried the observer, both the original query points (crosses) and
# new query points (dots), and where they lie with respect to the contours of the Branin.

# %%
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(branin,
                         lower_bound.numpy(),
                         upper_bound.numpy(),
                         grid_density=30,
                         contour=True)

plot_bo_points(query_points,
               ax=ax[0, 0],
               num_init=num_initial_data_points,
               idx_best=arg_min_idx)

# %% [markdown]
# We can also visualise the observations on a three-dimensional plot of the Branin. We'll add
# the contours of the mean and variance of the model's predictive distribution as translucent
# surfaces.

# %%
fig = plot_gp_plotly(gpr,
                     lower_bound.numpy(),
                     upper_bound.numpy(),
                     grid_density=30)
fig = add_bo_points_plotly(
    x=query_points[:, 0],
    y=query_points[:, 1],