Ejemplo n.º 1
0
def plot_optimizer(opt, x, acq_name='EI'):
    model = opt.models[-1]
    x_model = opt.space.transform(x.tolist())

    # Plot Model(x) + contours
    y_pred, sigma = model.predict(x_model, return_std=True)
    plt.plot(x, y_pred, "g--", label=r"$\mu(x)$")
    plt.fill(np.concatenate([x, x[::-1]]),
             #np.concatenate([y_pred - 1.9600 * sigma,
             #                 (y_pred + 1.9600 * sigma)[::-1]]),
             np.concatenate([y_pred - sigma,
                             (y_pred + sigma)[::-1]]),
             alpha=.2, fc="g", ec="None")

    # Plot sampled points
    plt.plot(opt.Xi, opt.yi,
             "ro", label="Observations")

    if acq_name == 'EI':
        acq = gaussian_ei(x_model, model, y_opt=np.min(opt.yi),
                          **opt.acq_func_kwargs)
        acq /= acq.max()
    elif acq_name == 'LCB':
        acq = gaussian_lcb(x_model, model, **opt.acq_func_kwargs)
        acq /= acq.min()

    # shift down to make a better plot
    acq = acq - 2
    plt.plot(x, acq, "b", label="%s(x)" % acq_name)
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

    # Adjust plot layout
    plt.grid()
    plt.legend(loc='best')
Ejemplo n.º 2
0
def plot_space(prev_x, prev_y, model, x_cand):
    all_x = np.reshape(np.linspace(0, 6, 100), (-1, 1))
    all_f = [black_box(xi) for xi in all_x]
    plt.plot(all_x, all_f)

    plt.plot(np.ravel(prev_x), prev_y, "ro", label="Prev points")
    lcb_vals = gaussian_lcb(all_x, model)
    plt.plot(all_x, lcb_vals, "black", label="LCB")

    y_cand = black_box(x_cand)
    plt.plot([x_cand], [y_cand], "go", markersize=10, label="Next cand")
    plt.legend(numpoints=1)
    return plt
Ejemplo n.º 3
0
        acq1 = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
        plt.plot(grid, acq1, "b", label="EI(x)")
        plt.fill_between(grid.ravel(),
                         -2.0,
                         acq1.ravel(),
                         alpha=0.3,
                         color='blue')
        acq2 = gaussian_pi(x_gp, gp, y_opt=np.min(curr_func_vals))
        plt.plot(grid, acq2, "r", label="PI(x)")
        plt.fill_between(grid.ravel(),
                         -2.0,
                         acq2.ravel(),
                         alpha=0.3,
                         color='red')
        acq3 = -gaussian_lcb(x_gp, gp, kappa=kappa)
        plt.plot(grid, acq3, "g", label="LCB(x)")
        plt.fill_between(grid.ravel(),
                         -2.0,
                         acq3.ravel(),
                         alpha=0.3,
                         color='green')

        #Weighted with LCB, EI, PI
        n_candidates = 3
        lcb_weight = acq_func_kwargs.get("lcb_w", 1. / n_candidates)
        ei_weight = acq_func_kwargs.get("ei_w", 1. / n_candidates)
        pi_weight = acq_func_kwargs.get("pi_w", 1. / n_candidates)

        weights_sum = lcb_weight + ei_weight + pi_weight
min_y = [12.275, 2.275, 2.475]

x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]
subplot_no = 221

res = gp_minimize(
    branin, dimensions, search='lbfgs', maxiter=10, random_state=0,
    acq="LCB", n_start=1, n_restarts_optimizer=2)
gp_model = res.models[-1]
opt_points = res['x_iters']

posterior_mean, posterior_std = gp_model.predict(vals, return_std=True)
acquis_values = gaussian_lcb(vals, gp_model)
acquis_values = acquis_values.reshape(100, 100)
posterior_mean = posterior_mean.reshape(100, 100)
posterior_std = posterior_std.reshape(100, 100)
best_min = vals[np.argmin(acquis_values)]

plt.subplot(subplot_no)
plt.pcolormesh(x_ax, y_ax, posterior_mean)
plt.plot(opt_points[:, 0], opt_points[:, 1], 'wo', markersize=5, label="sampled points")
plt.plot(best_min[0], best_min[1], 'ro', markersize=5, label="GP min")
plt.plot(min_x, min_y, 'go', markersize=5, label="true minima")
plt.colorbar()
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
Ejemplo n.º 5
0
def test_acquisition_lcb_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    lcb = gaussian_lcb(X, ConstSurrogate(), kappa=0.3)
    assert_array_almost_equal(lcb, [-0.3] * 4)
Ejemplo n.º 6
0
def test_acquisition_variance_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    var = gaussian_lcb(X, ConstSurrogate(), kappa='inf')
    assert_array_almost_equal(var, [-1.0] * 4)
Ejemplo n.º 7
0
    def plot_optimizer(self, x, it=0):
        opt = self.hp_opt
        model = opt.models[-1]
        x_model = opt.space.transform(x.tolist())

        plt.figure(figsize=(6.4 * 2, 4.8))
        plt.subplot(1, 2, 1)
        # Plot Model(x) + contours
        y_pred, sigma = model.predict(x_model, return_std=True)
        y_pred *= -1
        plt.plot(x, y_pred, "g--", label=r"$\mu(x)$")
        plt.fill(
            np.concatenate([x, x[::-1]]),
            np.concatenate(
                [y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]),
            alpha=0.2,
            fc="g",
            ec="None",
        )

        # Plot sampled points
        W = 10
        yi = np.array(opt.yi)[-W:] * -1
        Xi = opt.Xi[-W:]
        plt.plot(Xi, yi, "r.", markersize=8, label="Observations")

        plt.grid()
        plt.legend(loc="best")
        plt.xlim(0.001, 0.1)
        plt.ylim(0, 1)
        plt.xlabel("Learning Rate")
        plt.ylabel("Objective")
        plt.xscale("log")

        ax = plt.gca()
        ax.xaxis.set_major_locator(ticker.FixedLocator([0.001, 0.01, 0.1]))
        ax.xaxis.set_major_formatter(
            ticker.FixedFormatter(["0.001", "0.01", "0.1"]))
        ax.yaxis.set_major_locator(ticker.MultipleLocator(0.2))

        # LCB
        plt.subplot(1, 2, 2)
        acq = gaussian_lcb(x_model, model) * -1
        plt.plot(x, acq, "b", label="UCB(x)")
        plt.fill_between(x.ravel(), 0.0, acq.ravel(), alpha=0.3, color="blue")

        plt.xlabel("Learning Rate")

        # Adjust plot layout
        plt.grid()
        plt.legend(loc="best")
        plt.xlim(0.001, 0.1)
        plt.ylim(0, 1)
        plt.xscale("log")

        ax = plt.gca()
        ax.xaxis.set_major_locator(ticker.FixedLocator([0.001, 0.01, 0.1]))
        ax.xaxis.set_major_formatter(
            ticker.FixedFormatter(["0.001", "0.01", "0.1"]))
        ax.yaxis.set_major_locator(ticker.MultipleLocator(0.2))

        # Save Figure
        plt.savefig(f"opt-{it:05}.png", dpi=100)
        plt.close()
def test_acquisition_lcb_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    lcb = gaussian_lcb(X, ConstSurrogate(), kappa=0.3)
    assert_array_almost_equal(lcb, [-0.3] * 4)
Ejemplo n.º 9
0
all_x = np.reshape(np.linspace(0, 6, 100), (-1, 1))
all_f = [black_box(xi) for xi in all_x]

# Plot all points.
plt.plot(all_x, all_f, "green", label="Ground truth")

# Train only one third of the training data.
X = np.reshape(np.linspace(4, 6, 10), (-1, 1))
y = [black_box(xi) for xi in X]

# Use RBF kernel.
rbf = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=rbf, alpha=1e-12)
gpr.fit(X, y)
y_pred, y_std = gpr.predict(all_x, return_std=True)

ei_vals = -gaussian_ei(all_x, gpr, y_opt=np.min(y))
lcb_vals = gaussian_lcb(all_x, gpr)
all_x_plot = np.ravel(all_x)
upper_bound = y_pred + 1.96 * y_std
lower_bound = y_pred - 1.96 * y_std

plt.title("Acquisition values.")
plt.plot(np.ravel(X), y, "ro")
plt.plot(all_x_plot, y_pred, "r", label="Predictions")
plt.plot(all_x_plot, ei_vals, "b", label="-EI")
plt.plot(all_x_plot, lcb_vals, "black", label="LCB")
plt.legend()
plt.show()
Ejemplo n.º 10
0
def test_acquisition_variance_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    var = gaussian_lcb(X, ConstSurrogate(), kappa='inf')
    assert_array_almost_equal(var, [-1.0] * 4)