def test_reproducible_runs(strategy, surrogate): # two runs of the optimizer should yield exactly the same results optimizer = Optimizer( base_estimator=surrogate(random_state=1), dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)], acq_optimizer="sampling", random_state=1, ) points = [] for i in range(n_steps): x = optimizer.ask(n_points, strategy) points.append(x) optimizer.tell(x, [branin(v) for v in x]) # the x's should be exaclty as they are in `points` optimizer = Optimizer( base_estimator=surrogate(random_state=1), dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)], acq_optimizer="sampling", random_state=1, ) for i in range(n_steps): x = optimizer.ask(n_points, strategy) assert points[i] == x optimizer.tell(x, [branin(v) for v in x])
def test_all_points_different(strategy, surrogate): """ Tests whether the parallel optimizer always generates different points to evaluate. Parameters ---------- * `strategy` [string]: Name of the strategy to use during optimization. * `surrogate` [scikit-optimize surrogate class]: A class of the scikit-optimize surrogate used in Optimizer. """ optimizer = Optimizer( base_estimator=surrogate(), dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)], acq_optimizer="sampling", random_state=1, ) tolerance = 1e-3 # distance above which points are assumed same for i in range(n_steps): x = optimizer.ask(n_points, strategy) optimizer.tell(x, [branin(v) for v in x]) distances = pdist(x) assert all(distances > tolerance)
def test_same_set_of_points_ask(strategy, surrogate): """ For n_points not None, tests whether two consecutive calls to ask return the same sets of points. Parameters ---------- * `strategy` [string]: Name of the strategy to use during optimization. * `surrogate` [scikit-optimize surrogate class]: A class of the scikit-optimize surrogate used in Optimizer. """ optimizer = Optimizer( base_estimator=surrogate(), dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)], acq_optimizer="sampling", random_state=2, ) for i in range(n_steps): xa = optimizer.ask(n_points, strategy) xb = optimizer.ask(n_points, strategy) optimizer.tell(xa, [branin(v) for v in xa]) assert_equal(xa, xb) # check if the sets of points generated are equal
def test_dict_list_space_representation(): """ Tests whether the conversion of the dictionary and list representation of a point from a search space works properly. """ chef_space = { "Cooking time": (0, 1200), # in minutes "Main ingredient": [ "cheese", "cherimoya", "chicken", "chard", "chocolate", "chicory", ], "Secondary ingredient": ["love", "passion", "dedication"], "Cooking temperature": (-273.16, 10000.0), # in Celsius } opt = Optimizer(dimensions=dimensions_aslist(chef_space)) point = opt.ask() # check if the back transformed point and original one are equivalent assert_equal(point, point_aslist(chef_space, point_asdict(chef_space, point)))
def test_purely_categorical_space(): # Test reproduces the bug in #908, make sure it doesn't come back dims = [Categorical(["a", "b", "c"]), Categorical(["A", "B", "C"])] optimizer = Optimizer(dims, n_initial_points=2, random_state=3) for _ in range(2): x = optimizer.ask() # before the fix this call raised an exception optimizer.tell(x, np.random.uniform())
def test_dump_and_load_optimizer(): base_estimator = ExtraTreesRegressor(random_state=2) opt = Optimizer( [(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling" ) opt.run(bench1, n_iter=3) with tempfile.TemporaryFile() as f: dump(opt, f) f.seek(0) load(f)
def test_constant_liar_runs(strategy, surrogate, acq_func): """ Tests whether the optimizer runs properly during the random initialization phase and beyond Parameters ---------- * `strategy` [string]: Name of the strategy to use during optimization. * `surrogate` [scikit-optimize surrogate class]: A class of the scikit-optimize surrogate used in Optimizer. """ optimizer = Optimizer( base_estimator=surrogate(), dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)], acq_func=acq_func, acq_optimizer="sampling", random_state=0, ) # test arguments check assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"}) assert_raises(ValueError, optimizer.ask, {"n_points": "0"}) assert_raises(ValueError, optimizer.ask, {"n_points": 0}) for i in range(n_steps): x = optimizer.ask(n_points=n_points, strategy=strategy) # check if actually n_points was generated assert_equal(len(x), n_points) if "ps" in acq_func: optimizer.tell(x, [[branin(v), 1.1] for v in x]) else: optimizer.tell(x, [branin(v) for v in x])
def test_n_random_starts_Optimizer(): # n_random_starts got renamed in v0.4 et = ExtraTreesRegressor(random_state=2) with pytest.deprecated_call(): Optimizer([(0, 1.0)], et, n_random_starts=10, acq_optimizer="sampling")