Ejemplo n.º 1
0
def plot_optimizer(opt, x, acq_name='EI'):
    model = opt.models[-1]
    x_model = opt.space.transform(x.tolist())

    # Plot Model(x) + contours
    y_pred, sigma = model.predict(x_model, return_std=True)
    plt.plot(x, y_pred, "g--", label=r"$\mu(x)$")
    plt.fill(np.concatenate([x, x[::-1]]),
             #np.concatenate([y_pred - 1.9600 * sigma,
             #                 (y_pred + 1.9600 * sigma)[::-1]]),
             np.concatenate([y_pred - sigma,
                             (y_pred + sigma)[::-1]]),
             alpha=.2, fc="g", ec="None")

    # Plot sampled points
    plt.plot(opt.Xi, opt.yi,
             "ro", label="Observations")

    if acq_name == 'EI':
        acq = gaussian_ei(x_model, model, y_opt=np.min(opt.yi),
                          **opt.acq_func_kwargs)
        acq /= acq.max()
    elif acq_name == 'LCB':
        acq = gaussian_lcb(x_model, model, **opt.acq_func_kwargs)
        acq /= acq.min()

    # shift down to make a better plot
    acq = acq - 2
    plt.plot(x, acq, "b", label="%s(x)" % acq_name)
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

    # Adjust plot layout
    plt.grid()
    plt.legend(loc='best')
def plot_optimizer(opt, x, fx):
    model = opt.models[-1]
    x_model = opt.space.transform(x.tolist())

    # Plot true function.
    plt.plot(x, fx, "r--", label="True (unknown)")
    plt.fill(np.concatenate([x, x[::-1]]),
             np.concatenate(
                 [fx - 1.9600 * noise_level, fx[::-1] + 1.9600 * noise_level]),
             alpha=.2,
             fc="r",
             ec="None")

    # Plot Model(x) + contours
    y_pred, sigma = model.predict(x_model, return_std=True)
    plt.plot(x, y_pred, "g--", label=r"$\mu(x)$")
    plt.fill(np.concatenate([x, x[::-1]]),
             np.concatenate(
                 [y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]),
             alpha=.2,
             fc="g",
             ec="None")

    # Plot sampled points
    plt.plot(opt.Xi, opt.yi, "r.", markersize=8, label="Observations")

    acq = gaussian_ei(x_model, model, y_opt=np.min(opt.yi))
    # shift down to make a better plot
    acq = 4 * acq - 2
    plt.plot(x, acq, "b", label="EI(x)")
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

    # Adjust plot layout
    plt.grid()
    plt.legend(loc='best')
Ejemplo n.º 3
0
 def neg_ei(x):
     if len(known_values) > 0:
         a, grad = gaussian_ei(x.reshape(1, -1),
                               model=model,
                               y_opt=y_opt,
                               return_grad=True)
         return -a, -grad
     else:
         return -1.0, np.zeros(x.shape[0])
Ejemplo n.º 4
0
        def acq_f(x):
            if len(known_values) > 0:
                acq = gaussian_ei(x, model=model, y_opt=y_opt)
            else:
                acq = np.zeros(x.shape[0])

            prev_mean, prev_std = prev_model.predict(x, return_std=True)

            phi = ndtr((threshold - prev_mean) / prev_std)

            return -acq * phi
Ejemplo n.º 5
0
        def acq_f(x):
            if len(known_values) > 0:
                acq, acq_grad = gaussian_ei(x.reshape(1, -1),
                                            model=model,
                                            y_opt=y_opt,
                                            return_grad=True)
            else:
                acq, acq_grad = 1.0, np.zeros(x.shape[0])

            prev_mean, prev_std, prev_mean_grad, prev_std_grad = \
              prev_model.predict(x.reshape(1, -1), return_std=True, return_mean_grad=True, return_std_grad=True)

            phi = ndtr((threshold - prev_mean) / prev_std)

            dphi_dz = 1.0 / prev_std / np.sqrt(2 * np.pi) * np.exp(
                -(prev_mean - threshold)**2 / 2.0 / prev_std**2)
            dz_dx = -prev_mean_grad / prev_std + prev_mean / (
                prev_std**2) * prev_std_grad

            dphi_dx = dphi_dz * dz_dx

            full_grad = acq_grad * phi + acq * dphi_dx

            return -acq * phi, -full_grad
def get_action_using_bo(_env):
    all_acq = gaussian_ei(_env.possible_locations,
                          _env.gp,
                          np.min(_env.train_targets),
                          xi=1.0)
    best_loc = _env.possible_locations[np.where(all_acq == np.max(all_acq))][0]
    vect_dist = np.subtract(best_loc, _env.position)
    ang = (np.arctan2(vect_dist[0], -vect_dist[1]) + np.pi) / (2 * np.pi)
    # determina la distancia y luego encuentra el ratio con respecto al max dist (normaliza)
    dist_ = np.exp(_env.gp.kernel_.theta[0]) * 0.375 / _env.max_step_distance()
    if dist_ > 1.0:
        dist_ = 1.0
    acq_state = np.zeros(_env.map_size)
    acq_state[_env.possible_locations[:, 0],
              _env.possible_locations[:, 1]] = all_acq
    # plt.figure()
    # plt.imshow(acq_state)
    # plt.plot(_env.train_inputs[:, 1], _env.train_inputs[:, 0], 'xr')
    # plt.plot(best_loc[1], best_loc[0], '^y')
    # plt.plot(_env.position[1], _env.position[0], 'xb')
    # action = _env.action2vector([dist_, ang]) + _env.position
    # print("best: ", best_loc, "pos : ", _env.position, "dist: ", vect_dist, "next: ", action)
    # plt.plot(action[1], action[0], '^b')
    return [dist_, ang]
Ejemplo n.º 7
0
    # Adjust plot layout
    plt.grid()

    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)

    if n_iter != 4:
        plt.tick_params(axis='x',
                        which='both',
                        bottom='off',
                        top='off',
                        labelbottom='off')

    # Plot EI(x)
    plt.subplot(5, 2, 2 * n_iter + 2)
    acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
    plt.plot(x, acq, "b", label="EI(x)")
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

    next_x = res.x_iters[5 + n_iter]
    next_acq = gaussian_ei(res.space.transform([next_x]),
                           gp,
                           y_opt=np.min(curr_func_vals))
    plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")

    # Adjust plot layout
    plt.ylim(0, 0.1)
    plt.grid()

    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)
    model.fit(x_data, y_data)

    # use bayesian optimization
    for i in range(initial_points, iterations):
        minimum = min(y_data)
        # minimum = 99999
        max_expected_improvement = 0
        max_points = []
        max_points_unnormalized = []

        for pool_size in range(thread_pool_min, thread_pool_max + 1):
            x = [pool_size]
            x_normalized = [_normalize(x[0], thread_pool_min, thread_pool_max)]

            ei = gaussian_ei(np.array(x_normalized).reshape(1, -1), model, minimum)

            if ei > max_expected_improvement:
                max_expected_improvement = ei
                max_points = [x_normalized]
                max_points_unnormalized = [x]

            elif ei == max_expected_improvement:
                max_points.append(x_normalized)
                max_points_unnormalized.append(x)

        if max_expected_improvement == 0:
            print("WARN: Maximum expected improvement was 0. Most likely to pick a random point next")

        # select the point with maximum expected improvement
        # if there're multiple points with same ei, chose randomly
def test_acquisition_ei_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    ei = gaussian_ei(X, ConstSurrogate(), -0.5, xi=0.)
    assert_array_almost_equal(ei, [0.1977966] * 4)
dimensions = [(-5.0, 10.0), (0.0, 15.0)]

x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]

res = gbrt_minimize(
    branin, dimensions, maxiter=200, random_state=1)

model = res.models[-1]
opt_points = res.x_iters
y_opt = res.fun
x_opt = res.x

acquis_values = gaussian_ei(vals, model, y_opt)
acquis_values = acquis_values.reshape(100, 100)

branin_vals = np.reshape([branin(val) for val in vals], (100, 100))

plt.subplot(211)
plt.pcolormesh(x_ax, y_ax, acquis_values)
plt.plot(opt_points[:, 0], opt_points[:, 1], 'ro',
         markersize=4, lw=0, label='samples')
plt.plot(x_opt[0], x_opt[1], 'ws', markersize=8, label='best')
plt.colorbar()
plt.legend(loc='best', numpoints=1)
plt.xlabel('X1')
plt.xlim([-5, 10])
plt.ylabel('X2')
plt.ylim([0, 15])
def plot_optimizer(res, next_x, x, fx, n_iter, max_iters=5):
    x_gp = res.space.transform(x.tolist())
    gp = res.models[-1]
    curr_x_iters = res.x_iters
    curr_func_vals = res.func_vals

    # Plot true function.
    ax = plt.subplot(max_iters, 2, 2 * n_iter + 1)
    plt.plot(x, fx, "r--", label="True (unknown)")
    plt.fill(np.concatenate([x, x[::-1]]),
             np.concatenate(
                 [fx - 1.9600 * noise_level, fx[::-1] + 1.9600 * noise_level]),
             alpha=.2,
             fc="r",
             ec="None")
    if n_iter < max_iters - 1:
        ax.get_xaxis().set_ticklabels([])
    # Plot GP(x) + contours
    y_pred, sigma = gp.predict(x_gp, return_std=True)
    plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
    plt.fill(np.concatenate([x, x[::-1]]),
             np.concatenate(
                 [y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]),
             alpha=.2,
             fc="g",
             ec="None")

    # Plot sampled points
    plt.plot(curr_x_iters,
             curr_func_vals,
             "r.",
             markersize=8,
             label="Observations")
    plt.title(r"x* = %.4f, f(x*) = %.4f" % (res.x[0], res.fun))
    # Adjust plot layout
    plt.grid()

    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)

    if n_iter != 4:
        plt.tick_params(axis='x',
                        which='both',
                        bottom='off',
                        top='off',
                        labelbottom='off')

    # Plot EI(x)
    ax = plt.subplot(max_iters, 2, 2 * n_iter + 2)
    acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
    plt.plot(x, acq, "b", label="EI(x)")
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

    if n_iter < max_iters - 1:
        ax.get_xaxis().set_ticklabels([])

    next_acq = gaussian_ei(res.space.transform([next_x]),
                           gp,
                           y_opt=np.min(curr_func_vals))
    plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")

    # Adjust plot layout
    plt.ylim(0, 0.07)
    plt.grid()
    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)

    if n_iter != 4:
        plt.tick_params(axis='x',
                        which='both',
                        bottom='off',
                        top='off',
                        labelbottom='off')
Ejemplo n.º 12
0
    # Plot samples from optimsation points
    fig.add_trace(
        go.Scatter(x=curr_x_iters,
                   y=-curr_func_vals,
                   name="Samples from optimisation",
                   visible=False,
                   mode='markers',
                   marker=dict(color='red', size=10)),
        row=1,
        col=1,
    )

    # # Plot acquisition function
    if n_iter > 0:
        acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
        # fig.add_trace(go.Scatter(name = 'Acquisition function',
        #                          x = x_data,
        #                          y = acq,
        #                          visible=False,
        #                          line = {'shape': 'spline', 'color': 'pink'},
        #                         ),
        #                     row=2,
        #                     col=1
        #              )

        fig.add_trace(go.Scatter(name='Acquisition function',
                                 x=x_data,
                                 y=acq,
                                 visible=False,
                                 line={
Ejemplo n.º 13
0
    plt.plot(curr_x_iters, curr_func_vals,
             "r.", markersize=8, label="Observations")
    
    # Adjust plot layout
    plt.grid()

    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)
        
    if n_iter != 4:
        plt.tick_params(axis='x', which='both', bottom='off', 
                        top='off', labelbottom='off') 

    # Plot EI(x)
    plt.subplot(5, 2, 2*n_iter+2)
    acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
    plt.plot(x, acq, "b", label="EI(x)")
    plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')
    
    next_x = res.x_iters[5+n_iter]
    next_acq = gaussian_ei(res.space.transform([next_x]), gp, y_opt=np.min(curr_func_vals))
    plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")
    
    # Adjust plot layout
    plt.ylim(0, 0.1)
    plt.grid()
    
    if n_iter == 0:
        plt.legend(loc="best", prop={'size': 6}, numpoints=1)
        
    if n_iter != 4:
Ejemplo n.º 14
0
 def neg_ei(x):
     if len(known_values) > 0:
         a = gaussian_ei(x, model=model, y_opt=y_opt)
         return -a
     else:
         return -1.0
Ejemplo n.º 15
0
all_x = np.reshape(np.linspace(0, 6, 100), (-1, 1))
all_f = [black_box(xi) for xi in all_x]

# Plot all points.
plt.plot(all_x, all_f, "green", label="Ground truth")

# Train only one third of the training data.
X = np.reshape(np.linspace(4, 6, 10), (-1, 1))
y = [black_box(xi) for xi in X]

# Use RBF kernel.
rbf = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=rbf, alpha=1e-12)
gpr.fit(X, y)
y_pred, y_std = gpr.predict(all_x, return_std=True)

ei_vals = -gaussian_ei(all_x, gpr, y_opt=np.min(y))
lcb_vals = gaussian_lcb(all_x, gpr)
all_x_plot = np.ravel(all_x)
upper_bound = y_pred + 1.96 * y_std
lower_bound = y_pred - 1.96 * y_std

plt.title("Acquisition values.")
plt.plot(np.ravel(X), y, "ro")
plt.plot(all_x_plot, y_pred, "r", label="Predictions")
plt.plot(all_x_plot, ei_vals, "b", label="-EI")
plt.plot(all_x_plot, lcb_vals, "black", label="LCB")
plt.legend()
plt.show()
Ejemplo n.º 16
0
def plot_bo(f, n, res, noise_level):
    plt.rcParams["figure.figsize"] = (8, 14)

    x = np.linspace(0.5, 1.5, 400).reshape(-1, 1)
    x_gp = res.space.transform(x.tolist())
    fx = np.array([f(x_i) for x_i in x])

    # Plot the first n iterations
    print("n = ", n)
    for n_iter in range(n):
        print("n_iter = ", n_iter)
        gp = res.models[n_iter]
        curr_x_iters = res.x_iters[:n_iter + 1]
        curr_func_vals = res.func_vals[:n_iter + 1]
        print("res.func_vals = ", res.func_vals)
        print("curr_func_vals = ", curr_func_vals)

        # Plot true function.
        plt.subplot(n, 2, 2 * n_iter + 1)
        plt.plot(x, fx, "r--", label="True (unknown)")
        plt.fill(np.concatenate([x, x[::-1]]),
                 np.concatenate([
                     fx - 1.9600 * noise_level, fx[::-1] + 1.9600 * noise_level
                 ]),
                 alpha=.2,
                 fc="r",
                 ec="None")

        # Plot GP(x) + contours
        y_pred, sigma = gp.predict(x_gp, return_std=True)
        plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
        plt.fill(np.concatenate([x, x[::-1]]),
                 np.concatenate([
                     y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]
                 ]),
                 alpha=.2,
                 fc="g",
                 ec="None")

        # Plot sampled points
        plt.plot(curr_x_iters,
                 curr_func_vals,
                 "r.",
                 markersize=8,
                 label="Observations")

        # Adjust plot layout
        plt.grid()

        if n_iter == 0:
            plt.legend(loc="best", prop={'size': 6}, numpoints=1)

        if n_iter != n:
            plt.tick_params(axis='x',
                            which='both',
                            bottom='off',
                            top='off',
                            labelbottom='off')

            # Plot EI(x)
        plt.subplot(n, 2, 2 * n_iter + 2)
        # print("x_gp = ", x_gp)
        # print("curr_func_vals = ", curr_func_vals)
        # print("min = ", np.min(curr_func_vals))
        acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
        plt.plot(x, acq, "b", label="EI(x)")
        plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

        next_x = res.x_iters[n_iter]
        next_acq = gaussian_ei(res.space.transform([next_x]),
                               gp,
                               y_opt=np.min(curr_func_vals))
        plt.plot(next_x,
                 next_acq,
                 "bo",
                 markersize=6,
                 label="Next query point")

        # Adjust plot layout
        plt.ylim(0, 0.1)
        plt.grid()

        if n_iter == 0:
            plt.legend(loc="best", prop={'size': 6}, numpoints=1)

        if n_iter != n:
            plt.tick_params(axis='x',
                            which='both',
                            bottom='off',
                            top='off',
                            labelbottom='off')

    plt.show()
Ejemplo n.º 17
0
def test_acquisition_ei_correctness():
    # check that it works with a vector as well
    X = 10 * np.ones((4, 2))
    ei = gaussian_ei(X, ConstSurrogate(), -0.5, xi=0.)
    assert_array_almost_equal(ei, [0.1977966] * 4)
Ejemplo n.º 18
0
def main():
    # import some data to play with
    X = []
    y = []
    with codecs.open("../data/machine.data", 'r', 'utf-8') as infile:
        for line in infile:
            tokens = line.split(',')
            X.append([float(x) for x in tokens[:5]])
            y.append(float(tokens[6]))
    slice = int(round(len(X)*0.8))
    X_train = X[:slice]
    X_test = X[slice:]
    y_train = y[:slice]
    y_test = y[slice:]
    regr = linear_model.Lasso()
    regr.fit(X_train, y_train)
    y_predict = [i for i in regr.predict(X_test)]
    print("loss of the model:{}".format(mean_squared_error(y_test, y_predict)))

    #  apply gridsearch
    worst_case = float("inf")
    mse_gs_scores = []
    t0 = time.time()
    for g in [(i+1)*0.001 for i in range(8000)]:
        regr = linear_model.Lasso(alpha=g)
        regr.fit(X_train, y_train)
        y_pred = [i for i in regr.predict(X_test)]
        mse = mean_squared_error(y_test, y_pred)
        mse_gs_scores.append([g,mse])
        # save if best
        if mse < worst_case:
            worst_case = mse
            best_grid = g
    t1 = time.time()
    print("time taken by gridserach: {}".format(t1 - t0))
    print((worst_case,best_grid))

    # applying random search
    worst_case = float("inf")
    mse_rs_scores = []
    t0 = time.time()
    for _ in range(1000):
        g = uniform(0, 8)
        regr = linear_model.Lasso(alpha=g)
        regr.fit(X_train, y_train)
        y_pred = [i for i in regr.predict(X_test)]
        mse = mean_squared_error(y_test, y_pred)
        mse_rs_scores.append([g, mse])
        # save if best
        if mse < worst_case:
            worst_case = mse
            best_random = g
    t1 = time.time()
    print("time taken by randomserach: {}".format(t1 - t0))
    print((worst_case,best_random))

    # apply bayesian optimization
    noise_level = 0.1
    def f(alphavalue):
        regr = linear_model.Lasso(alpha=alphavalue)
        regr.fit(X_train, y_train)
        y_pred = [i for i in regr.predict(X_test)]
        return mean_squared_error(y_test, y_pred)
    x = np.array([(i+1)*0.001 for i in range(8000)])
    fx = [f(x_i) for x_i in x]
    plt.plot(x, fx, "r--", label="True (unknown)")
    plt.fill(np.concatenate([x, x[::-1]]),
             np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
                             [fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
             alpha=.2, fc="r", ec="None")
    t4 = time.time()
    res = gp_minimize(f,  # the function to minimize
                      [(0.001, 8.0)],  # the bounds on each dimension of x
                      acq_func="EI",  # the acquisition function
                      n_calls=15,  # the number of evaluations of f
                      n_random_starts=5,  # the number of random initialization points
                      random_state=123)
    t5 = time.time()
    print("time taken by BO_search: {}".format(t5 - t4))
    print(res['fun'])
    print(res['x'])

    plt.plot(res.x_iters, res.func_vals, "b--", label="BO")
    plt.plot([i[0] for i in mse_rs_scores][:10], [i[1] for i in mse_rs_scores][:10], "g--", label="Random Search")
    plt.legend()
    plt.grid()
    plt.show()




    plt.rcParams["figure.figsize"] = (8, 14)

    x = np.linspace(0.001, 8.0, 8000).reshape(-1, 1)
    x_gp = res.space.transform(x.tolist())
    fx = np.array([f(x_i) for x_i in x])

    # Plot the 5 iterations following the 5 random points
    for n_iter in range(5):
        gp = res.models[n_iter]
        curr_x_iters = res.x_iters[:5 + n_iter]
        curr_func_vals = res.func_vals[:5 + n_iter]

        # Plot true function.
        plt.subplot(5, 2, 2 * n_iter + 1)
        plt.plot(x, fx, "r--", label="True (unknown)")
        plt.fill(np.concatenate([x, x[::-1]]),
                 np.concatenate([fx - 1.9600 * noise_level,
                                 fx[::-1] + 1.9600 * noise_level]),
                 alpha=.2, fc="r", ec="None")

        # Plot GP(x) + contours
        y_pred, sigma = gp.predict(x_gp, return_std=True)
        plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
        plt.fill(np.concatenate([x, x[::-1]]),
                 np.concatenate([y_pred - 1.9600 * sigma,
                                 (y_pred + 1.9600 * sigma)[::-1]]),
                 alpha=.2, fc="g", ec="None")

        # Plot sampled points
        plt.plot(curr_x_iters, curr_func_vals,
                 "r.", markersize=8, label="Observations")

        # Adjust plot layout
        plt.grid()

        if n_iter == 0:
            plt.legend(loc="best", prop={'size': 6}, numpoints=1)

        if n_iter != 4:
            plt.tick_params(axis='x', which='both', bottom='off',
                            top='off', labelbottom='off')

            # Plot EI(x)
        plt.subplot(5, 2, 2 * n_iter + 2)
        acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
        plt.plot(x, acq, "b", label="EI(x)")
        plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')

        next_x = res.x_iters[5 + n_iter]
        next_acq = gaussian_ei(res.space.transform([next_x]), gp, y_opt=np.min(curr_func_vals))
        plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")

        # Adjust plot layout
        plt.ylim(0, 0.1)
        plt.grid()

        if n_iter == 0:
            plt.legend(loc="best", prop={'size': 6}, numpoints=1)

        if n_iter != 4:
            plt.tick_params(axis='x', which='both', bottom='off',
                            top='off', labelbottom='off')

    plt.show()
row_no = 6

res = gbrt_minimize(
    bench3, dimensions, maxiter=6, n_start=1, random_state=1)
best_xs = res.x_iters.ravel()
best_ys = res.func_vals.ravel()
models = res.models

for n_iter in range(5):
    model = models[n_iter]
    best_x = best_xs[:n_iter+1]
    best_y = best_ys[:n_iter+1]

    low, mu, high = model.predict(vals).T
    std = (high - low) / 2
    acquis_values = -gaussian_ei(vals, model, best_y[-1])
    acquis_values = acquis_values.ravel()
    posterior_mean = mu.ravel()
    posterior_std = std.ravel()
    upper_bound = posterior_mean + posterior_std
    lower_bound = posterior_mean - posterior_std

    plt.subplot(2, 5, col_no)
    plt.plot(x, func_values, color='red', linestyle="--", label="true func")
    plt.plot(x, posterior_mean, color='blue', label="GBRT mean")
    plt.fill_between(
        x, lower_bound, upper_bound, alpha=0.3, color='blue', label="GBRT std")

    sampled_y = [bench3(x) for x in best_x]
    plt.plot(best_x, sampled_y, 'ro', label="observations", markersize=5)
    plt.title("n_iter = %d" % (n_iter + 1))
    def plot_bayes_social(f, res, num_plots, lower_bound, upper_bound):
        print("Plot Bayes")
        plot_convergence(res)
        plt.show()
        initial_num_true_func_samples = 4000
        x = np.linspace(lower_bound, upper_bound,
                        initial_num_true_func_samples).reshape(-1, 1)
        x_gp = res.space.transform(x.tolist())
        for n_iter in range(num_plots):
            # Print some dubg info.
            gp = res.models[n_iter]
            curr_x_iters = res.x_iters[:num_plots + n_iter]
            curr_func_vals = res.func_vals[:num_plots + n_iter]
            #print('Iteration', n_iter, ', curr_x_iters = ', curr_x_iters, ', curr_func_vals = ', curr_func_vals)

            # Plot true function.
            plt.subplot(num_plots, 2, 2 * n_iter + 1)
            # fx = np.array([f(x_i) for x_i in range(int(lower_bound), int(upper_bound) + 1)])
            true_x = np.linspace(lower_bound, upper_bound, num=100)
            plt.step(true_x, [f((i, )) for i in true_x],
                     "r--",
                     label="True (unknown)",
                     where='post')

            # Plot GP(x) + contours
            y_pred, sigma = gp.predict(x_gp, return_std=True)
            plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
            plt.fill(np.concatenate([x, x[::-1]]),
                     np.concatenate([
                         y_pred - 1.9600 * sigma,
                         (y_pred + 1.9600 * sigma)[::-1]
                     ]),
                     alpha=.2,
                     fc="g",
                     ec="None")

            # Plot sampled points
            plt.plot(curr_x_iters,
                     curr_func_vals,
                     "r.",
                     markersize=8,
                     label="Observations")

            # Adjust plot layout
            plt.grid()

            if n_iter == 0:
                plt.legend(loc="best", prop={'size': 6}, numpoints=1)

            if n_iter != num_plots - 1:
                plt.tick_params(axis='x',
                                which='both',
                                bottom='off',
                                top='off',
                                labelbottom='off')

            # Plot EI(x)
            plt.subplot(num_plots, 2, 2 * n_iter + 2)
            acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
            plt.plot(x, acq, "b", label="EI(x)")
            plt.fill_between(x.ravel(),
                             -2.0,
                             acq.ravel(),
                             alpha=0.3,
                             color='blue')

            next_x = res.x_iters[num_plots + n_iter]
            next_acq = gaussian_ei(res.space.transform([next_x]),
                                   gp,
                                   y_opt=np.min(curr_func_vals))
            plt.plot(next_x,
                     next_acq,
                     "bo",
                     markersize=6,
                     label="Next query point")

            # Adjust plot layout
            plt.ylim(lower_bound, upper_bound)
            plt.grid()

            if n_iter == 0:
                plt.legend(loc="best", prop={'size': 6}, numpoints=1)

            if n_iter != num_plots - 1:
                plt.tick_params(axis='x',
                                which='both',
                                bottom='off',
                                top='off',
                                labelbottom='off')

        plt.show()