def test_optimizer_copy(acq_func): # Checks that the base estimator, the objective and target values # are copied correctly. base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(-2.0, 2.0)], base_estimator, acq_func=acq_func, n_initial_points=1, acq_optimizer="sampling", ) # run three iterations so that we have some points and objective values if "ps" in acq_func: opt.run(bench1_with_time, n_iter=3) else: opt.run(bench1, n_iter=3) opt_copy = opt.copy() copied_estimator = opt_copy.base_estimator_ if "ps" in acq_func: assert isinstance(copied_estimator, MultiOutputRegressor) # check that the base_estimator is not wrapped multiple times is_multi = isinstance(copied_estimator.estimator, MultiOutputRegressor) assert not is_multi else: assert not isinstance(copied_estimator, MultiOutputRegressor) assert_array_equal(opt_copy.Xi, opt.Xi) assert_array_equal(opt_copy.yi, opt.yi)
def test_invalid_tell_arguments_list(): base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling") assert_raises(ValueError, opt.tell, [[1.0], [2.0]], [1.0, None])
def test_invalid_tell_arguments(): base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling") # can't have single point and multiple values for y assert_raises(ValueError, opt.tell, [1.0], [1.0, 1.0])
def test_returns_result_object(): base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling") result = opt.tell([1.5], 2.0) assert isinstance(result, OptimizeResult) assert_equal(len(result.x_iters), len(result.func_vals)) assert_equal(np.min(result.func_vals), result.fun)
def test_dump_and_load_optimizer(): base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling" ) opt.run(bench1, n_iter=3) with tempfile.TemporaryFile() as f: dump(opt, f) f.seek(0) load(f)
def test_bounds_checking_1D(): low = -2.0 high = 2.0 base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(low, high)], base_estimator, n_initial_points=1, acq_optimizer="sampling") assert_raises(ValueError, opt.tell, [high + 0.5], 2.0) assert_raises(ValueError, opt.tell, [low - 0.5], 2.0) # feed two points to tell() at once assert_raises(ValueError, opt.tell, [high + 0.5, high], (2.0, 3.0)) assert_raises(ValueError, opt.tell, [low - 0.5, high], (2.0, 3.0))
def test_bounds_checking_2D(): low = -2.0 high = 2.0 base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(low, high), (low + 4, high + 4)], base_estimator, n_initial_points=1, acq_optimizer="sampling", ) assert_raises(ValueError, opt.tell, [high + 0.5, high + 4.5], 2.0) assert_raises(ValueError, opt.tell, [low - 0.5, low - 4.5], 2.0) # first out, second in assert_raises(ValueError, opt.tell, [high + 0.5, high + 0.5], 2.0) assert_raises(ValueError, opt.tell, [low - 0.5, high + 0.5], 2.0)
def test_model_queue_size(): # Check if model_queue_size limits the model queue size base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling", model_queue_size=2, ) opt.run(bench1, n_iter=3) # tell() computes the next point ready for the next call to ask() # hence there are three after three iterations assert_equal(len(opt.models), 2) assert_equal(len(opt.Xi), 3) opt.ask() assert_equal(len(opt.models), 2) assert_equal(len(opt.Xi), 3) assert_equal(opt.ask(), opt.ask())
def test_multiple_asks(): # calling ask() multiple times without a tell() inbetween should # be a "no op" base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling") opt.run(bench1, n_iter=3) # tell() computes the next point ready for the next call to ask() # hence there are three after three iterations assert_equal(len(opt.models), 3) assert_equal(len(opt.Xi), 3) opt.ask() assert_equal(len(opt.models), 3) assert_equal(len(opt.Xi), 3) assert_equal(opt.ask(), opt.ask()) opt.update_next() assert_equal(opt.ask(), opt.ask())
def test_bounds_checking_2D_multiple_points(): low = -2.0 high = 2.0 base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(low, high), (low + 4, high + 4)], base_estimator, n_initial_points=1, acq_optimizer="sampling", ) # first component out, second in assert_raises( ValueError, opt.tell, [(high + 0.5, high + 0.5), (high + 0.5, high + 0.5)], [2.0, 3.0], ) assert_raises( ValueError, opt.tell, [(low - 0.5, high + 0.5), (low - 0.5, high + 0.5)], [2.0, 3.0], )
def test_n_random_starts_Optimizer(): # n_random_starts got renamed in v0.4 et = ExtraTreesRegressor(random_state=2) with pytest.deprecated_call(): Optimizer([(0, 1.0)], et, n_random_starts=10, acq_optimizer="sampling")
def test_extra_forest(): # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] clf = ExtraTreesRegressor(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert 10 == len(clf) clf = ExtraTreesRegressor(n_estimators=10, min_impurity_decrease=0.1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert 10 == len(clf) clf = ExtraTreesRegressor( n_estimators=10, criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=False, oob_score=False, n_jobs=1, random_state=1, verbose=0, warm_start=False, ) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert 10 == len(clf) clf = ExtraTreesRegressor(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert 10 == len(clf) # also test apply leaf_indices = clf.apply(X) assert leaf_indices.shape == (len(X), clf.n_estimators)
from sklearn.multioutput import MultiOutputRegressor from numpy.testing import assert_array_equal from numpy.testing import assert_equal from numpy.testing import assert_raises from deephyper.skopt import gp_minimize from deephyper.skopt import forest_minimize from deephyper.skopt.benchmarks import bench1, bench1_with_time from deephyper.skopt.benchmarks import branin from deephyper.skopt.learning import ExtraTreesRegressor, RandomForestRegressor from deephyper.skopt.learning import GradientBoostingQuantileRegressor from deephyper.skopt.optimizer import Optimizer from scipy.optimize import OptimizeResult TREE_REGRESSORS = ( ExtraTreesRegressor(random_state=2), RandomForestRegressor(random_state=2), GradientBoostingQuantileRegressor(random_state=2), ) ACQ_FUNCS_PS = ["EIps", "PIps"] ACQ_FUNCS_MIXED = ["EI", "EIps"] ESTIMATOR_STRINGS = [ "GP", "RF", "ET", "GBRT", "DUMMY", "gp", "rf", "et", "gbrt",