def test_lhs_arange(): dim = Categorical(['a', 'b', 'c']) dim.lhs_arange(10) dim = Integer(1, 20) dim.lhs_arange(10) dim = Real(-10, 20) dim.lhs_arange(10)
def test_purely_categorical_space(): # Test reproduces the bug in #908, make sure it doesn't come back dims = [Categorical(['a', 'b', 'c']), Categorical(['A', 'B', 'C'])] optimizer = Optimizer(dims, n_initial_points=1, random_state=3) x = optimizer.ask() # before the fix this call raised an exception optimizer.tell(x, 1.)
def test_searchcv_sklearn_compatibility(): """ Test whether the BayesSearchCV is compatible with base sklearn methods such as clone, set_params, get_params. """ X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0) # used to try different model classes pipe = Pipeline([('model', SVC())]) # single categorical value of 'model' parameter sets the model class lin_search = { 'model': Categorical([LinearSVC()]), 'model__C': Real(1e-6, 1e+6, prior='log-uniform'), } dtc_search = { 'model': Categorical([DecisionTreeClassifier()]), 'model__max_depth': Integer(1, 32), 'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'), } svc_search = { 'model': Categorical([SVC()]), 'model__C': Real(1e-6, 1e+6, prior='log-uniform'), 'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'), 'model__degree': Integer(1, 8), 'model__kernel': Categorical(['linear', 'poly', 'rbf']), } opt = BayesSearchCV(pipe, [(lin_search, 1), svc_search], n_iter=2) opt_clone = clone(opt) params, params_clone = opt.get_params(), opt_clone.get_params() assert params.keys() == params_clone.keys() for param, param_clone in zip(params.items(), params_clone.items()): assert param[0] == param_clone[0] assert isinstance(param[1], type(param_clone[1])) opt.set_params(search_spaces=[(dtc_search, 1)]) opt.fit(X_train, y_train) opt_clone.fit(X_train, y_train) total_evaluations = len(opt.cv_results_['mean_test_score']) total_evaluations_clone = len(opt_clone.cv_results_['mean_test_score']) # test if expected number of subspaces is explored assert total_evaluations == 1 assert total_evaluations_clone == 1 + 2
def test_categorical_distance(): categories = ['car', 'dog', 'orange'] cat = Categorical(categories) for cat1 in categories: for cat2 in categories: delta = cat.distance(cat1, cat2) if cat1 == cat2: assert delta == 0 else: assert delta == 1
def test_Constraints_init(): space = Space([ Real(1, 10), Real(1, 10), Real(1, 10), Integer(0, 10), Integer(0, 10), Integer(0, 10), Categorical(list('abcdefg')), Categorical(list('abcdefg')), Categorical(list('abcdefg')) ]) cons_list = [Single(0,5.0,'real'), Inclusive(1,(3.0,5.0),'real'), Exclusive(2,(3.0,5.0),'real'), Single(3,5,'integer'), Inclusive(4,(3,5),'integer'), Exclusive(5,(3,5),'integer'), Single(6,'b','categorical'), Inclusive(7,('c','d','e'),'categorical'), Exclusive(8,('c','d','e'),'categorical'), # Note that two ocnstraints are being added to dimension 4 and 5 Inclusive(4,(7,9),'integer'), Exclusive(5,(7,9),'integer'), ] cons = Constraints(cons_list,space) # Test that space and constriants_list are being saved in object assert_equal(cons.space, space) assert_equal(cons.constraints_list, cons_list) # Test that a correct list of single constraints have been made assert_equal(len(cons.single),space.n_dims) assert_equal(cons.single[1], None) assert_equal(cons.single[-1], None) assert_not_equal(cons.single[0], None) assert_not_equal(cons.single[6],None) # Test that a correct list of inclusive constraints have been made assert_equal(len(cons.inclusive),space.n_dims) assert_equal(cons.inclusive[0],[]) assert_equal(cons.inclusive[2],[]) assert_not_equal(not cons.inclusive[1],[]) assert_not_equal(not cons.inclusive[7],[]) assert_equal(len(cons.inclusive[4]),2) # Test that a correct list of exclusive constraints have been made assert_equal(len(cons.exclusive),space.n_dims) assert_equal(cons.exclusive[3],[]) assert_equal(cons.exclusive[7],[]) assert_not_equal(cons.exclusive[2],[]) assert_not_equal(cons.exclusive[5],[]) assert_equal(len(cons.exclusive[5]),2)
def test_categorical_transform(): categories = ["apple", "orange", "banana", None, True, False, 3] cat = Categorical(categories) apple = [1., 0., 0., 0., 0., 0., 0.] orange = [0., 1.0, 0.0, 0.0, 0., 0., 0.] banana = [0., 0., 1., 0., 0., 0., 0.] none = [0., 0., 0., 1., 0., 0., 0.] true = [0., 0., 0., 0., 1., 0., 0.] false = [0., 0., 0., 0., 0., 1., 0.] three = [0., 0., 0., 0., 0., 0., 1.] assert_equal(cat.transformed_size, 7) assert_equal(cat.transformed_size, cat.transform(["apple"]).size) assert_array_equal( cat.transform(categories), [apple, orange, banana, none, true, false, three] ) assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange]) assert_array_equal(cat.transform(["apple", "banana"]), [apple, banana]) assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"]) assert_array_equal(cat.inverse_transform([apple, banana]), ["apple", "banana"]) ent_inverse = cat.inverse_transform( [apple, orange, banana, none, true, false, three]) assert_array_equal(ent_inverse, categories)
def test_searchcv_reproducibility(): """ Test whether results of BayesSearchCV can be reproduced with a fixed random state. """ X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0) random_state = 42 opt = BayesSearchCV(SVC(random_state=random_state), { 'C': Real(1e-6, 1e+6, prior='log-uniform'), 'gamma': Real(1e-6, 1e+1, prior='log-uniform'), 'degree': Integer(1, 8), 'kernel': Categorical(['linear', 'poly', 'rbf']), }, n_iter=11, random_state=random_state) opt.fit(X_train, y_train) best_est = opt.best_estimator_ opt2 = clone(opt).fit(X_train, y_train) best_est2 = opt2.best_estimator_ assert getattr(best_est, 'C') == getattr(best_est2, 'C') assert getattr(best_est, 'gamma') == getattr(best_est2, 'gamma') assert getattr(best_est, 'degree') == getattr(best_est2, 'degree') assert getattr(best_est, 'kernel') == getattr(best_est2, 'kernel')
def test_constraints_rvs(): space = Space([ Real(1, 10), Real(1, 10), Real(1, 10), Integer(0, 10), Integer(0, 10), Integer(0, 10), Categorical(list('abcdefg')), Categorical(list('abcdefg')), Categorical(list('abcdefg')) ]) cons_list = [Single(0,5.0,'real'), Inclusive(1,(3.0,5.0),'real'), Exclusive(2,(3.0,5.0),'real'), Single(3,5,'integer'), Inclusive(4,(3,5),'integer'), Exclusive(5,(3,5),'integer'), Single(6,'b','categorical'), Inclusive(7,('c','d','e'),'categorical'), Exclusive(8,('c','d','e'),'categorical'), # Note that two constraints are being added to dimension 4 and 5 Inclusive(4,(7,9),'integer'), Exclusive(5,(7,9),'integer'), ] # Test lenght of samples constraints = Constraints(cons_list,space) samples = constraints.rvs(n_samples = 100) assert_equal(len(samples),100) assert_equal(len(samples[0]),space.n_dims) assert_equal(len(samples[-1]),space.n_dims) # Test random state samples_a = constraints.rvs(n_samples = 100,random_state = 1) samples_b = constraints.rvs(n_samples = 100,random_state = 1) samples_c = constraints.rvs(n_samples = 100,random_state = 2) assert_equal(samples_a,samples_b) assert_not_equal(samples_a,samples_c) # Test invalid constraint combinations space = Space([Real(0, 1)]) cons_list = [Exclusive(0,(0.3,0.7),'real'), Inclusive(0,(0.5,0.6),'real')] constraints = Constraints(cons_list,space) with raises(RuntimeError): samples = constraints.rvs(n_samples = 10)
def test_lhs(): SPACE = Space([ Integer(-20, 20), Real(-10.5, 100), Categorical(list('abc'))] ) samples = SPACE.lhs(10) assert len(samples) == 10 assert len(samples[0]) == 3
def test_searchcv_runs_multiple_subspaces(): """ Test whether the BayesSearchCV runs without exceptions when multiple subspaces are given. """ X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0) # used to try different model classes pipe = Pipeline([('model', SVC())]) # single categorical value of 'model' parameter sets the model class lin_search = { 'model': Categorical([LinearSVC()]), 'model__C': Real(1e-6, 1e+6, prior='log-uniform'), } dtc_search = { 'model': Categorical([DecisionTreeClassifier()]), 'model__max_depth': Integer(1, 32), 'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'), } svc_search = { 'model': Categorical([SVC()]), 'model__C': Real(1e-6, 1e+6, prior='log-uniform'), 'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'), 'model__degree': Integer(1, 8), 'model__kernel': Categorical(['linear', 'poly', 'rbf']), } opt = BayesSearchCV(pipe, [(lin_search, 1), (dtc_search, 1), svc_search], n_iter=2) opt.fit(X_train, y_train) # test if all subspaces are explored total_evaluations = len(opt.cv_results_['mean_test_score']) assert total_evaluations == 1 + 1 + 2, "Not all spaces were explored!"
def test_categorical_repr(): small_cat = Categorical([1, 2, 3, 4, 5]) assert (small_cat.__repr__() == "Categorical(categories=(1, 2, 3, 4, 5), prior=None)") big_cat = Categorical([1, 2, 3, 4, 5, 6, 7, 8]) assert (big_cat.__repr__() == 'Categorical(categories=(1, 2, 3, ..., 6, 7, 8), prior=None)')
def test_categorical_identity(): categories = ["cat", "dog", "rat"] cat = Categorical(categories, transform="identity") samples = cat.rvs(100) assert_true(all([t in categories for t in cat.rvs(100)])) transformed = cat.transform(samples) assert_array_equal(transformed, samples) assert_array_equal(samples, cat.inverse_transform(transformed))
def test_searchcv_runs(surrogate, n_jobs, n_points, cv=None): """ Test whether the cross validation search wrapper around sklearn models runs properly with available surrogates and with single or multiple workers and different number of parameter settings to ask from the optimizer in parallel. Parameters ---------- * `surrogate` [str or None]: A class of the scikit-optimize surrogate used. None means to use default surrogate. * `n_jobs` [int]: Number of parallel processes to use for computations. """ X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=0) # create an instance of a surrogate if it is not a string if surrogate is not None: optimizer_kwargs = {'base_estimator': surrogate} else: optimizer_kwargs = None opt = BayesSearchCV(SVC(), { 'C': Real(1e-6, 1e+6, prior='log-uniform'), 'gamma': Real(1e-6, 1e+1, prior='log-uniform'), 'degree': Integer(1, 8), 'kernel': Categorical(['linear', 'poly', 'rbf']), }, n_jobs=n_jobs, n_iter=11, n_points=n_points, cv=cv, optimizer_kwargs=optimizer_kwargs) opt.fit(X_train, y_train) # this normally does not hold only if something is wrong # with the optimizaiton procedure as such assert_greater(opt.score(X_test, y_test), 0.9)
def test_categorical_transform_binary(): categories = ["apple", "orange"] cat = Categorical(categories) apple = [0.] orange = [1.] assert_equal(cat.transformed_size, 1) assert_equal(cat.transformed_size, cat.transform(["apple"]).size) assert_array_equal(cat.transform(categories), [apple, orange]) assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange]) assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"]) ent_inverse = cat.inverse_transform([apple, orange]) assert_array_equal(ent_inverse, categories)
def pow10map(x): return 10.0**x def pow2intmap(x): return int(2.0**x) def nop(x): return x nnparams = { # up to 1024 neurons 'hidden_layer_sizes': (Real(1.0, 10.0), pow2intmap), 'activation': (Categorical(['identity', 'logistic', 'tanh', 'relu']), nop), 'solver': (Categorical(['lbfgs', 'sgd', 'adam']), nop), 'alpha': (Real(-5.0, -1), pow10map), 'batch_size': (Real(5.0, 10.0), pow2intmap), 'learning_rate': (Categorical(['constant', 'invscaling', 'adaptive']), nop), 'max_iter': (Real(5.0, 8.0), pow2intmap), 'learning_rate_init': (Real(-5.0, -1), pow10map), 'power_t': (Real(0.01, 0.99), nop), 'momentum': (Real(0.1, 0.98), nop), 'nesterovs_momentum': (Categorical([True, False]), nop), 'beta_1': (Real(0.1, 0.98), nop), 'beta_2': (Real(0.1, 0.9999999), nop), } MODELS = {
def test_optimizer_with_constraints(acq_optimizer): base_estimator = 'GP' space = Space([ Real(1, 10), Real(1, 10), Real(1, 10), Integer(0, 10), Integer(0, 10), Integer(0, 10), Categorical(list('abcdefg')), Categorical(list('abcdefg')), Categorical(list('abcdefg')) ]) cons_list = [Single(0,5.0,'real'),Single(3,5,'integer')] cons_list_2 = [Single(0,4.0,'real'),Single(3,4,'integer')] cons = Constraints(cons_list,space) cons_2 = Constraints(cons_list_2,space) # Test behavior when not adding constraitns opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 5) # Test that constraint is None when no constraint has been set so far assert_equal(opt._constraints,None) # Test constraints are still None for _ in range(6): next_x= opt.ask() f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_equal(opt._constraints,None) opt.remove_constraints() assert_equal(opt._constraints,None) # Test behavior when adding constraints in an optimization setting opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3) opt.set_constraints(cons) assert_equal(opt._constraints,cons) next_x= opt.ask() assert_equal(next_x[0],5.0) assert_equal(next_x[3],5) f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_equal(opt._constraints,cons) opt.set_constraints(cons_2) next_x= opt.ask() assert_equal(opt._constraints,cons_2) assert_equal(next_x[0],4.0) assert_equal(next_x[3],4) f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_equal(opt._constraints,cons_2) opt.remove_constraints() assert_equal(opt._constraints,None) next_x= opt.ask() assert_not_equal(next_x[0],4.0) assert_not_equal(next_x[0],5.0) f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_equal(opt._constraints,None) # Test that next_x is changed when adding constraints opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3) assert_false(hasattr(opt,'_next_x')) for _ in range(4): # We exhaust initial points next_x= opt.ask() f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_true(hasattr(opt,'_next_x')) # Now next_x should be in optimizer assert_not_equal(next_x[0],4.0) assert_not_equal(next_x[0],5.0) next_x = opt._next_x opt.set_constraints(cons) assert_not_equal(opt._next_x,next_x) # Check that next_x has been changed assert_equal(opt._next_x[0],5.0) assert_equal(opt._next_x[3],5) next_x = opt._next_x opt.set_constraints(cons_2) assert_not_equal(opt._next_x,next_x) assert_equal(opt._next_x[0],4.0) assert_equal(opt._next_x[3],4) # Test that adding a Constraint or constraint_list gives the same opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3) opt.set_constraints(cons_list) opt2 = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3) opt2.set_constraints(cons) assert_equal(opt._constraints,opt2._constraints) # Test that constraints are satisfied opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 2) opt.set_constraints(cons) next_x= opt.ask() assert_equal(next_x[0],5.0) opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 2) next_x= opt.ask() assert_not_equal(next_x[0],5.0) f_val = np.random.random()*100 opt.tell(next_x, f_val) opt.set_constraints(cons) next_x= opt.ask() assert_equal(next_x[0],5.0) assert_equal(next_x[3],5) opt.set_constraints(cons) next_x= opt.ask() f_val = np.random.random()*100 opt.tell(next_x, f_val) opt.set_constraints(cons_2) next_x= opt.ask() assert_equal(next_x[0],4.0) assert_equal(next_x[3],4) f_val = np.random.random()*100 opt.tell(next_x, f_val) assert_equal(next_x[0],4.0) assert_equal(next_x[3],4)
def test_Constraints_validate_sample(): space = Space([ Real(1, 10), Real(1, 10), Real(1, 10), Integer(0, 10), Integer(0, 10), Integer(0, 10), Categorical(list('abcdefg')), Categorical(list('abcdefg')), Categorical(list('abcdefg')) ]) # Test validation of single constraints cons_list = [Single(0,5.0,'real')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[0] = 5.0 assert_true(cons.validate_sample(sample)) sample[0] = 5.00001 assert_false(cons.validate_sample(sample)) sample[0] = 4.99999 assert_false(cons.validate_sample(sample)) cons_list = [Single(3,5,'integer')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[3] = 5 assert_true(cons.validate_sample(sample)) sample[3] = 6 assert_false(cons.validate_sample(sample)) sample[3] = -5 assert_false(cons.validate_sample(sample)) sample[3] = 5.000001 assert_false(cons.validate_sample(sample)) cons_list = [Single(6,'a','categorical')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[6] = 'a' assert_true(cons.validate_sample(sample)) sample[6] = 'b' assert_false(cons.validate_sample(sample)) sample[6] = -5 assert_false(cons.validate_sample(sample)) sample[6] = 5.000001 assert_false(cons.validate_sample(sample)) # Test validation of inclusive constraints cons_list = [Inclusive(0,(5.0,7.0),'real')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[0] = 5.0 assert_true(cons.validate_sample(sample)) sample[0] = 7.0 assert_true(cons.validate_sample(sample)) sample[0] = 7.00001 assert_false(cons.validate_sample(sample)) sample[0] = 4.99999 assert_false(cons.validate_sample(sample)) sample[0] = -10 assert_false(cons.validate_sample(sample)) cons_list = [Inclusive(3,(5,7),'integer')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[3] = 5 assert_true(cons.validate_sample(sample)) sample[3] = 6 assert_true(cons.validate_sample(sample)) sample[3] = 7 assert_true(cons.validate_sample(sample)) sample[3] = 8 assert_false(cons.validate_sample(sample)) sample[3] = 4 assert_false(cons.validate_sample(sample)) sample[3] = -4 assert_false(cons.validate_sample(sample)) cons_list = [Inclusive(6,('c','d','e'),'categorical')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[6] = 'c' assert_true(cons.validate_sample(sample)) sample[6] = 'e' assert_true(cons.validate_sample(sample)) sample[6] = 'f' assert_false(cons.validate_sample(sample)) sample[6] = -5 assert_false(cons.validate_sample(sample)) sample[6] = 3.3 assert_false(cons.validate_sample(sample)) sample[6] = 'a' assert_false(cons.validate_sample(sample)) # Test validation of exclusive constraints cons_list = [Exclusive(0,(5.0,7.0),'real')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[0] = 5.0 assert_false(cons.validate_sample(sample)) sample[0] = 7.0 assert_false(cons.validate_sample(sample)) sample[0] = 7.00001 assert_true(cons.validate_sample(sample)) sample[0] = 4.99999 assert_true(cons.validate_sample(sample)) sample[0] = -10 assert_true(cons.validate_sample(sample)) cons_list = [Exclusive(3,(5,7),'integer')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[3] = 5 assert_false(cons.validate_sample(sample)) sample[3] = 6 assert_false(cons.validate_sample(sample)) sample[3] = 7 assert_false(cons.validate_sample(sample)) sample[3] = 8 assert_true(cons.validate_sample(sample)) sample[3] = 4 assert_true(cons.validate_sample(sample)) sample[3] = -4 assert_true(cons.validate_sample(sample)) cons_list = [Exclusive(3,(5,5),'integer')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[3] = 5 assert_false(cons.validate_sample(sample)) cons_list = [Exclusive(6,('c','d','e'),'categorical')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[6] = 'c' assert_false(cons.validate_sample(sample)) sample[6] = 'e' assert_false(cons.validate_sample(sample)) sample[6] = 'f' assert_true(cons.validate_sample(sample)) sample[6] = -5 assert_true(cons.validate_sample(sample)) sample[6] = 3.3 assert_true(cons.validate_sample(sample)) sample[6] = 'a' assert_true(cons.validate_sample(sample)) # Test more than one constraint per dimension cons_list = [Inclusive(0,(1.0,2.0),'real'),Inclusive(0,(3.0,4.0),'real'),Inclusive(0,(5.0,6.0),'real')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[0] = 1.3 assert_true(cons.validate_sample(sample)) sample[0] = 6.0 assert_true(cons.validate_sample(sample)) sample[0] = 5.0 assert_true(cons.validate_sample(sample)) sample[0] = 3.0 assert_true(cons.validate_sample(sample)) sample[0] = 4.0 assert_true(cons.validate_sample(sample)) sample[0] = 5.5 assert_true(cons.validate_sample(sample)) sample[0] = 2.1 assert_false(cons.validate_sample(sample)) sample[0] = 4.9 assert_false(cons.validate_sample(sample)) sample[0] = 7.0 assert_false(cons.validate_sample(sample)) cons_list = [Exclusive(0,(1.0,2.0),'real'),Exclusive(0,(3.0,4.0),'real'),Exclusive(0,(5.0,6.0),'real')] cons = Constraints(cons_list,space) sample = [0]*space.n_dims sample[0] = 1.3 assert_false(cons.validate_sample(sample)) sample[0] = 6.0 assert_false(cons.validate_sample(sample)) sample[0] = 5.0 assert_false(cons.validate_sample(sample)) sample[0] = 3.0 assert_false(cons.validate_sample(sample)) sample[0] = 4.0 assert_false(cons.validate_sample(sample)) sample[0] = 5.5 assert_false(cons.validate_sample(sample)) sample[0] = 2.1 assert_true(cons.validate_sample(sample)) sample[0] = 4.9 assert_true(cons.validate_sample(sample)) sample[0] = 7.0 assert_true(cons.validate_sample(sample))
def test_space_consistency(): # Reals (uniform) s1 = Space([Real(0.0, 1.0)]) s2 = Space([Real(0.0, 1.0)]) s3 = Space([Real(0, 1)]) s4 = Space([(0.0, 1.0)]) s5 = Space([(0.0, 1.0, "uniform")]) s6 = Space([(0, 1.0)]) s7 = Space([(np.float64(0.0), 1.0)]) s8 = Space([(0, np.float64(1.0))]) a1 = s1.rvs(n_samples=10, random_state=0) a2 = s2.rvs(n_samples=10, random_state=0) a3 = s3.rvs(n_samples=10, random_state=0) a4 = s4.rvs(n_samples=10, random_state=0) a5 = s5.rvs(n_samples=10, random_state=0) assert_equal(s1, s2) assert_equal(s1, s3) assert_equal(s1, s4) assert_equal(s1, s5) assert_equal(s1, s6) assert_equal(s1, s7) assert_equal(s1, s8) assert_array_equal(a1, a2) assert_array_equal(a1, a3) assert_array_equal(a1, a4) assert_array_equal(a1, a5) # Reals (log-uniform) s1 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform")]) s2 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform")]) s3 = Space([Real(10**-3, 10**3, prior="log-uniform")]) s4 = Space([(10**-3.0, 10**3.0, "log-uniform")]) s5 = Space([(np.float64(10**-3.0), 10**3.0, "log-uniform")]) a1 = s1.rvs(n_samples=10, random_state=0) a2 = s2.rvs(n_samples=10, random_state=0) a3 = s3.rvs(n_samples=10, random_state=0) a4 = s4.rvs(n_samples=10, random_state=0) assert_equal(s1, s2) assert_equal(s1, s3) assert_equal(s1, s4) assert_equal(s1, s5) assert_array_equal(a1, a2) assert_array_equal(a1, a3) assert_array_equal(a1, a4) # Integers s1 = Space([Integer(1, 5)]) s2 = Space([Integer(1.0, 5.0)]) s3 = Space([(1, 5)]) s4 = Space([(np.int64(1.0), 5)]) s5 = Space([(1, np.int64(5.0))]) a1 = s1.rvs(n_samples=10, random_state=0) a2 = s2.rvs(n_samples=10, random_state=0) a3 = s3.rvs(n_samples=10, random_state=0) assert_equal(s1, s2) assert_equal(s1, s3) assert_equal(s1, s4) assert_equal(s1, s5) assert_array_equal(a1, a2) assert_array_equal(a1, a3) # Categoricals s1 = Space([Categorical(["a", "b", "c"])]) s2 = Space([Categorical(["a", "b", "c"])]) s3 = Space([["a", "b", "c"]]) a1 = s1.rvs(n_samples=10, random_state=0) a2 = s2.rvs(n_samples=10, random_state=0) a3 = s3.rvs(n_samples=10, random_state=0) assert_equal(s1, s2) assert_array_equal(a1, a2) assert_equal(s1, s3) assert_array_equal(a1, a3) s1 = Space([(True, False)]) s2 = Space([Categorical([True, False])]) s3 = Space([np.array([True, False])]) assert s1 == s2 == s3
def check_categorical(vals, random_val): x = Categorical(vals) assert_equal(x, Categorical(vals)) assert_not_equal(x, Categorical(vals[:-1] + ("zzz",))) assert_equal(x.rvs(random_state=1), random_val)
from ProcessOptimizer import plots, gp_minimize from ProcessOptimizer.plots import plot_objective from ProcessOptimizer import bokeh_plot # For reproducibility import numpy as np np.random.seed(123) import matplotlib.pyplot as plt plt.set_cmap("viridis") SPACE = [ Integer(1, 20, name='max_depth'), Integer(2, 100, name='min_samples_split'), Integer(5, 30, name='min_samples_leaf'), Integer(1, 30, name='max_features'), Categorical(list('abc'), name='dummy'), Categorical(['gini', 'entropy'], name='criterion'), Categorical(list('def'), name='dummy'), ] def objective(params): clf = DecisionTreeClassifier(**{ dim.name: val for dim, val in zip(SPACE, params) if dim.name != 'dummy' }) return -np.mean(cross_val_score(clf, *load_breast_cancer(True))) result = gp_minimize(objective, SPACE, n_calls=20)
@pytest.mark.fast_test @pytest.mark.parametrize("dimensions, normalizations", [ (((1, 3), (1., 3.)), ('normalize', 'normalize')), (((1, 3), ('a', 'b', 'c')), ('normalize', 'onehot')), ]) def test_normalize_dimensions(dimensions, normalizations): space = normalize_dimensions(dimensions) for dimension, normalization in zip(space, normalizations): assert dimension.transform_ == normalization @pytest.mark.fast_test @pytest.mark.parametrize( "dimension, name", [(Real(1, 2, name="learning rate"), "learning rate"), (Integer(1, 100, name="no of trees"), "no of trees"), (Categorical(["red, blue"], name="colors"), "colors")]) def test_normalize_dimensions(dimension, name): space = normalize_dimensions([dimension]) assert space.dimensions[0].name == name @pytest.mark.fast_test def test_use_named_args(): """ Test the function wrapper @use_named_args which is used for wrapping an objective function with named args so it can be called by the optimizers which only pass a single list as the arg. This test does not actually use the optimizers but merely simulates how they would call the function.
assert_equal(reals.distance(4.1234, i), abs(4.1234 - i)) @pytest.mark.parametrize("dimension, bounds", [(Real, (2, 1)), (Integer, (2, 1)), (Real, (2, 2)), (Integer, (2, 2))]) def test_dimension_bounds(dimension, bounds): with pytest.raises(ValueError) as exc: dim = dimension(*bounds) assert "has to be less than the upper bound " in exc.value.args[0] @pytest.mark.parametrize("dimension, name", [(Real(1, 2, name="learning rate"), "learning rate"), (Integer(1, 100, name="no of trees"), "no of trees"), (Categorical(["red, blue"], name="colors"), "colors")]) def test_dimension_name(dimension, name): assert dimension.name == name @pytest.mark.parametrize("dimension", [Real(1, 2), Integer(1, 100), Categorical(["red, blue"])]) def test_dimension_name_none(dimension): assert dimension.name is None @pytest.mark.fast_test def test_space_from_yaml(): with NamedTemporaryFile() as tmp: tmp.write(b""" Space: