def test_dump_and_load(): res = gp_minimize(bench3, [(-2.0, 2.0)], x0=[0.], acq_func="LCB", n_calls=2, n_random_starts=0, random_state=1) # Test normal dumping and loading with tempfile.TemporaryFile() as f: dump(res, f) f.seek(0) res_loaded = load(f) check_optimization_results_equality(res, res_loaded) assert_true("func" in res_loaded.specs["args"]) # Test dumping without objective function with tempfile.TemporaryFile() as f: dump(res, f, store_objective=False) f.seek(0) res_loaded = load(f) check_optimization_results_equality(res, res_loaded) assert_true(not ("func" in res_loaded.specs["args"])) # Delete the objective function and dump the modified object del res.specs["args"]["func"] with tempfile.TemporaryFile() as f: dump(res, f, store_objective=False) f.seek(0) res_loaded = load(f) check_optimization_results_equality(res, res_loaded) assert_true(not ("func" in res_loaded.specs["args"]))
def test_deadline_stopper(): deadline = DeadlineStopper(0.0001) gp_minimize(bench3, [(-1.0, 1.0)], callback=deadline, n_calls=10, random_state=1) assert len(deadline.iter_time) == 1 assert np.sum(deadline.iter_time) > deadline.total_time deadline = DeadlineStopper(60) gp_minimize(bench3, [(-1.0, 1.0)], callback=deadline, n_calls=10, random_state=1) assert len(deadline.iter_time) == 10 assert np.sum(deadline.iter_time) < deadline.total_time
def test_n_jobs(): r_single = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs", acq_func="EI", n_calls=2, n_random_starts=1, random_state=1, noise=1e-10) r_double = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs", acq_func="EI", n_calls=2, n_random_starts=1, random_state=1, noise=1e-10, n_jobs=2) assert_array_equal(r_single.x_iters, r_double.x_iters)
def test_expected_minimum(): res = gp_minimize(bench3, [(-2.0, 2.0)], x0=[0.], noise=1e-8, n_calls=8, n_random_starts=3, random_state=1) x_min, f_min = expected_minimum(res, random_state=1) x_min2, f_min2 = expected_minimum(res, random_state=1) assert f_min <= res.fun # true since noise ~= 0.0 assert x_min == x_min2 assert f_min == f_min2
def test_use_given_estimator(): """ Test that gp_minimize does not use default estimator if one is passed in explicitly. """ domain = [(1.0, 2.0), (3.0, 4.0)] noise_correct = 1e+5 noise_fake = 1e-10 estimator = cook_estimator("GP", domain, noise=noise_correct) res = gp_minimize(branin, domain, n_calls=1, n_random_starts=1, base_estimator=estimator, noise=noise_fake) assert res['models'][-1].noise == noise_correct
def check_minimize(func, y_opt, bounds, acq_optimizer, acq_func, margin, n_calls, n_random_starts=10): r = gp_minimize(func, bounds, acq_optimizer=acq_optimizer, acq_func=acq_func, n_random_starts=n_random_starts, n_calls=n_calls, random_state=1, noise=1e-10) assert_less(r.fun, y_opt + margin)
def test_defaults_are_equivalent(): # check that the defaults of Optimizer reproduce the defaults of # gp_minimize space = [(-5., 10.), (0., 15.)] #opt = Optimizer(space, 'ET', acq_func="EI", random_state=1) opt = Optimizer(space, random_state=1) for n in range(12): x = opt.ask() res_opt = opt.tell(x, branin(x)) #res_min = forest_minimize(branin, space, n_calls=12, random_state=1) res_min = gp_minimize(branin, space, n_calls=12, random_state=1) assert res_min.space == res_opt.space # tolerate small differences in the points sampled assert np.allclose(res_min.x_iters, res_opt.x_iters) # , atol=1e-5) assert np.allclose(res_min.x, res_opt.x) # , atol=1e-5)
def test_gpr_default(): """Smoke test that gp_minimize does not fail for default values.""" gp_minimize(branin, ((-5.0, 10.0), (0.0, 15.0)), n_random_starts=1, n_calls=2)
from ProcessOptimizer.plots import plot_objective from ProcessOptimizer import bokeh_plot # For reproducibility import numpy as np np.random.seed(123) import matplotlib.pyplot as plt plt.set_cmap("viridis") SPACE = [ Integer(1, 20, name='max_depth'), Integer(2, 100, name='min_samples_split'), Integer(5, 30, name='min_samples_leaf'), Integer(1, 30, name='max_features'), Categorical(list('abc'), name='dummy'), Categorical(['gini', 'entropy'], name='criterion'), Categorical(list('def'), name='dummy'), ] def objective(params): clf = DecisionTreeClassifier(**{ dim.name: val for dim, val in zip(SPACE, params) if dim.name != 'dummy' }) return -np.mean(cross_val_score(clf, *load_breast_cancer(True))) result = gp_minimize(objective, SPACE, n_calls=20) bokeh_plot.start(result)
from ProcessOptimizer import bokeh_plot # For reproducibility import numpy as np np.random.seed(123) plt.set_cmap("viridis") # Here we define a function that we evaluate. def funny_func(x): s = 0 for i in range(len(x)): s += (x[i])**2 return s # We run forest_minimize on the function bounds = [ (-1, 1.), ] * 7 n_calls = 30 result = gp_minimize(funny_func, bounds, n_calls=n_calls, n_random_starts=20, acq_optimizer="auto", acq_func="gp_hedge", random_state=4) bokeh_plot.start(result)