def test_acq_optimizer_with_time_api(base_estimator, acq_func): opt = Optimizer( [ (-2.0, 2.0), ], base_estimator=base_estimator, acq_func=acq_func, acq_optimizer="sampling", n_initial_points=2, ) x1 = opt.ask() opt.tell(x1, (bench1(x1), 1.0)) x2 = opt.ask() res = opt.tell(x2, (bench1(x2), 2.0)) # x1 and x2 are random. assert x1 != x2 assert len(res.models) == 1 assert_array_equal(res.func_vals.shape, (2, )) assert_array_equal(res.log_time.shape, (2, )) # x3 = opt.ask() with pytest.raises(TypeError) as e: opt.tell(x2, bench1(x2))
def test_model_queue_size(): # Check if model_queue_size limits the model queue size base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling", model_queue_size=2, ) opt.run(bench1, n_iter=3) # tell() computes the next point ready for the next call to ask() # hence there are three after three iterations assert_equal(len(opt.models), 2) assert_equal(len(opt.Xi), 3) opt.ask() assert_equal(len(opt.models), 2) assert_equal(len(opt.Xi), 3) assert_equal(opt.ask(), opt.ask())
def test_categorical_only2(): from numpy import linalg from deephyper.skopt.space import Categorical from deephyper.skopt.learning import GaussianProcessRegressor space = [Categorical([1, 2, 3]), Categorical([4, 5, 6])] opt = Optimizer( space, base_estimator=GaussianProcessRegressor(alpha=1e-7), acq_optimizer="lbfgs", n_initial_points=10, n_jobs=2, ) next_x = opt.ask(n_points=4) assert len(next_x) == 4 opt.tell(next_x, [linalg.norm(x) for x in next_x]) next_x = opt.ask(n_points=4) assert len(next_x) == 4 opt.tell(next_x, [linalg.norm(x) for x in next_x]) next_x = opt.ask(n_points=4) assert len(next_x) == 4
def test_exhaust_initial_calls(base_estimator): # check a model is fitted and used to make suggestions after we added # at least n_initial_points via tell() opt = Optimizer( [(-2.0, 2.0)], base_estimator, n_initial_points=2, acq_optimizer="sampling", random_state=1, ) x0 = opt.ask() # random point x1 = opt.ask() # random point assert x0 != x1 # first call to tell() r1 = opt.tell(x1, 3.0) assert len(r1.models) == 0 x2 = opt.ask() # random point assert x1 != x2 # second call to tell() r2 = opt.tell(x2, 4.0) if base_estimator.lower() == "dummy": assert len(r2.models) == 0 else: assert len(r2.models) == 1 # this is the first non-random point x3 = opt.ask() assert x2 != x3 x4 = opt.ask() r3 = opt.tell(x3, 1.0) # no new information was added so should be the same, unless we are using # the dummy estimator which will forever return random points and never # fits any models if base_estimator.lower() == "dummy": assert x3 != x4 assert len(r3.models) == 0 else: assert x3 == x4 assert len(r3.models) == 2
def test_categorical_only(): from deephyper.skopt.space import Categorical cat1 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) cat2 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) opt = Optimizer([cat1, cat2]) for n in range(15): x = opt.ask() res = opt.tell(x, 12 * n) assert len(res.x_iters) == 15 next_x = opt.ask(n_points=4) assert len(next_x) == 4 cat3 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"]) cat4 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"]) opt = Optimizer([cat3, cat4]) for n in range(15): x = opt.ask() res = opt.tell(x, 12 * n) assert len(res.x_iters) == 15 next_x = opt.ask(n_points=4) assert len(next_x) == 4
def test_defaults_are_equivalent(): # check that the defaults of Optimizer reproduce the defaults of # gp_minimize space = [(-5.0, 10.0), (0.0, 15.0)] # opt = Optimizer(space, 'ET', acq_func="EI", random_state=1) opt = Optimizer(space, random_state=1) for n in range(12): x = opt.ask() res_opt = opt.tell(x, branin(x)) # res_min = forest_minimize(branin, space, n_calls=12, random_state=1) res_min = gp_minimize(branin, space, n_calls=12, random_state=1) assert res_min.space == res_opt.space # tolerate small differences in the points sampled assert np.allclose(res_min.x_iters, res_opt.x_iters) # , atol=1e-5) assert np.allclose(res_min.x, res_opt.x) # , atol=1e-5) res_opt2 = opt.get_result() assert np.allclose(res_min.x_iters, res_opt2.x_iters) # , atol=1e-5) assert np.allclose(res_min.x, res_opt2.x) # , atol=1e-5)
def test_multiple_asks(): # calling ask() multiple times without a tell() inbetween should # be a "no op" base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling") opt.run(bench1, n_iter=3) # tell() computes the next point ready for the next call to ask() # hence there are three after three iterations assert_equal(len(opt.models), 3) assert_equal(len(opt.Xi), 3) opt.ask() assert_equal(len(opt.models), 3) assert_equal(len(opt.Xi), 3) assert_equal(opt.ask(), opt.ask()) opt.update_next() assert_equal(opt.ask(), opt.ask())