コード例 #1
0
def test_branin_bayes_sampling():
    res = gp_minimize(branin, [[-5, 10], [0, 15]],
                      random_state=0,
                      search='sampling',
                      maxiter=200,
                      acq='UCB')
    assert_less(res.fun, 0.41)
コード例 #2
0
def check_minimize(func, y_opt, bounds, search, acq, margin, maxiter):
    r = gp_minimize(func,
                    bounds,
                    search=search,
                    acq=acq,
                    maxiter=maxiter,
                    random_state=1)
    assert_less(r.fun, y_opt + margin)
コード例 #3
0
def check_minimize(func, y_opt, bounds, search, acq, margin, n_calls):
    r = gp_minimize(func,
                    bounds,
                    search=search,
                    acq=acq,
                    n_calls=n_calls,
                    random_state=1)
    assert_less(r.fun, y_opt + margin)
コード例 #4
0
def test_api():
    res = gp_minimize(branin, [[-5, 10], [0, 15]], random_state=0, maxiter=20)
    assert_array_equal(res.x.shape, (2, ))
    assert_array_equal(res.x_iters.shape, (20, 2))
    assert_array_equal(res.func_vals.shape, (20, ))
    assert_array_less(res.x_iters, np.tile([10, 15], (20, 1)))
    assert_array_less(np.tile([-5, 0], (20, 1)), res.x_iters)

    assert_raises(ValueError, gp_minimize, lambda x: x, [[-5, 10]])
コード例 #5
0
def test_branin_hartmann_sampling():
    bounds = np.tile((0, 1), (6, 1))
    res = gp_minimize(hartmann_6,
                      bounds,
                      random_state=0,
                      search='sampling',
                      maxiter=200,
                      acq='UCB')
    assert_less(res.fun, -2.5)
コード例 #6
0
def test_api():
    res = gp_minimize(branin, [(-5.0, 10.0), (0.0, 15.0)], random_state=0, maxiter=20)
    assert_array_equal(res.x.shape, (2,))
    assert_array_equal(res.x_iters.shape, (20, 2))
    assert_array_equal(res.func_vals.shape, (20,))
    assert_array_less(res.x_iters, np.tile([10, 15], (20, 1)))
    assert_array_less(np.tile([-5, 0], (20, 1)), res.x_iters)

    assert_raises(ValueError, gp_minimize, lambda x: x, [[-5, 10]])
コード例 #7
0
def plot_interactive_gp(func, bounds, random_state, max_iter=1000):
    rng = np.random.RandomState(0)
    res = gp_minimize(
        func, (bounds,), search='lbfgs', maxiter=max_iter, random_state=0,
        acq='UCB')
    gp_models = res.models
    best_x_l = res.x_iters.ravel()

    fig, ax = plt.subplots()
    plt.subplots_adjust(left=0.25, bottom=0.25)
    plt.title("Gaussian Process Approximation")
    t = np.linspace(bounds[0], bounds[1], 10000)
    t_gp = scale_to_uniform(t, bounds[0], bounds[1])
    t_gp = t_gp.reshape(-1, 1)

    y = [func([ele]) for ele in t]
    l, = plt.plot(t, y, lw=2, color='green')
    l1, = plt.plot(t, y, 'r--', lw=2)
    point = plt.plot([0], [0], 'ro')

    plt.axis([bounds[0], bounds[1], np.min(y), np.max(y)])

    axcolor = 'lightgoldenrodyellow'
    axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)

    gp_iter = Slider(axfreq, 'Iterations', 0, max_iter, valinit=1, valfmt="%d")

    def update(val):
        i = int(gp_iter.val)
        l1.set_ydata(gp_models[i - 1].predict(t_gp))
        point[-1].set_xdata(best_x_l[i - 1])
        point[0].set_ydata(func([best_x_l[i - 1]]))
        fig.canvas.draw_idle()

    gp_iter.on_changed(update)

    plt.show()
コード例 #8
0
ファイル: test_gp_opt.py プロジェクト: nel215/scikit-optimize
def check_minimize(func, y_opt, bounds, search, acq, margin, n_calls):
    r = gp_minimize(func, bounds, search=search, acq=acq,
                    n_calls=n_calls, random_state=1)
    assert_less(r.fun, y_opt + margin)
コード例 #9
0
for random_state in range(5):
    print("Doing a random search for the minimum.")
    t = time()
    dummy_model = dummy_minimize(branin,
                                 bounds,
                                 maxiter=200,
                                 random_state=random_state)
    print(time() - t)
    print("Best score obtained, %0.4f" % dummy_model.fun)

    print("Doing a gp-based search for the minimum")
    t = time()
    gp_model = gp_minimize(branin,
                           bounds,
                           maxiter=200,
                           random_state=random_state,
                           n_start=1)
    print(time() - t)
    print("Best score obtained, %0.4f" % gp_model.fun)

    for j in range(1, 201):
        best_dummy_scores[random_state,
                          j - 1] = np.min(dummy_model.func_vals[:j])
        best_gp_scores[random_state, j - 1] = np.min(gp_model.func_vals[:j])

mean_dummy_scores = np.mean(best_dummy_scores, axis=0)
mean_gp_scores = np.mean(best_gp_scores, axis=0)
err_dummy_scores = np.std(best_dummy_scores, axis=0) / sqrt(10)
err_gp_scores = np.std(best_gp_scores, axis=0) / sqrt(10)
コード例 #10
0
from skopt.gp_opt import gp_minimize


digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)

def compute_mean_validation_score(forest_params):
    max_depth, max_features, mss, msl = forest_params

    rfc.set_params(
        max_depth=max_depth, max_features=max_features,
        min_samples_split=mss, min_samples_leaf=msl)

    return -np.mean(cross_val_score(rfc, X, y, cv=3, n_jobs=-1))

# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
dimensions = [(3, 50), (1, 12), (1, 12), (1, 12)]
best_dummy_scores = np.zeros((5, 100))
best_gp_scores = np.zeros((5, 100))

gp_model = gp_minimize(compute_mean_validation_score,
                       dimensions,
                       maxiter=100,
                       random_state=0,
                       n_start=1)

print("Best score obtained = %0.4f for parameters %s" % (-gp_model.fun,
                                                         gp_model.x))
コード例 #11
0

def compute_mean_validation_score(forest_params):
    # Hack to allow integer parameters, since the parameters
    # sampled internally are uniform in a given range.
    forest_params = [int(param) for param in forest_params]
    max_depth, max_features, mss, msl = forest_params

    rfc.set_params(max_depth=max_depth,
                   max_features=max_features,
                   min_samples_split=mss,
                   min_samples_leaf=msl)
    return -np.mean(cross_val_score(rfc, X, y, cv=3, n_jobs=-1))


# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
best_dummy_scores = np.zeros((5, 100))
best_gp_scores = np.zeros((5, 100))

print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(compute_mean_validation_score,
                       bounds,
                       maxiter=100,
                       random_state=0,
                       n_start=1)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
コード例 #12
0
def test_branin_hartmann_sampling():
    bounds = np.tile((0, 1), (6, 1))
    res = gp_minimize(
        hartmann_6, bounds, random_state=0,
        search='sampling', maxiter=200, acq='UCB')
    assert_less(res.fun, -2.5)
コード例 #13
0
def test_branin_bayes_sampling():
    res = gp_minimize(
        branin, [[-5, 10], [0, 15]], random_state=0,
        search='sampling', maxiter=200, acq='UCB')
    assert_less(res.fun, 0.41)
コード例 #14
0
dimensions = [(-5.0, 10.0), (0.0, 15.0)]
best_dummy_scores = np.zeros((5, 200))
best_gp_scores = np.zeros((5, 200))
n_iterations = range(1, 201)

for random_state in range(5):
    print("Doing a random search for the minimum.")
    t = time()
    dummy_model = dummy_minimize(
        branin, dimensions, maxiter=200, random_state=random_state)
    print(time() - t)
    print("Best score obtained, %0.4f" % dummy_model.fun)

    print("Doing a gp-based search for the minimum")
    t = time()
    gp_model = gp_minimize(
        branin, dimensions, maxiter=200, random_state=random_state, n_start=1)
    print(time() - t)
    print("Best score obtained, %0.4f" % gp_model.fun)

    for j in range(1, 201):
        best_dummy_scores[random_state, j-1] = np.min(
            dummy_model.func_vals[:j])
        best_gp_scores[random_state, j-1] = np.min(
            gp_model.func_vals[:j])

mean_dummy_scores = np.mean(best_dummy_scores, axis=0)
mean_gp_scores = np.mean(best_gp_scores, axis=0)
err_dummy_scores = np.std(best_dummy_scores, axis=0) / sqrt(10)
err_gp_scores = np.std(best_gp_scores, axis=0) / sqrt(10)

print("Mean minimum value obtained after 200 iterations by dummy search "
コード例 #15
0
def check_minimize(func, y_opt, bounds, search, acq, margin, maxiter):
    r = gp_minimize(func, bounds, search=search, acq=acq,
                    maxiter=maxiter, random_state=1)
    assert_less(r.fun, y_opt + margin)