def test_get_conditions(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) self.assertEqual([], cs.get_conditions()) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) self.assertEqual([cond1], cs.get_conditions())
def _get_configuration_space(self) -> ConfigurationSpace: """Get the configuration space for the random forest. Returns ------- ConfigurationSpace """ cfg = ConfigurationSpace() cfg.seed(int(self.rs.randint(0, 1000))) num_trees = Constant("num_trees", value=N_TREES) bootstrap = CategoricalHyperparameter( "do_bootstrapping", choices=(self.bootstrap,), default_value=self.bootstrap, ) max_feats = CategoricalHyperparameter("max_features", choices=(3 / 6, 4 / 6, 5 / 6, 1), default_value=1) min_split = UniformIntegerHyperparameter("min_samples_to_split", lower=1, upper=10, default_value=2) min_leavs = UniformIntegerHyperparameter("min_samples_in_leaf", lower=1, upper=10, default_value=1) cfg.add_hyperparameters([num_trees, bootstrap, max_feats, min_split, min_leavs]) return cfg
def test_add_second_condition_wo_conjunction(self): hp1 = CategoricalHyperparameter("input1", [0, 1]) hp2 = CategoricalHyperparameter("input2", [0, 1]) hp3 = Constant("And", "True") cond1 = EqualsCondition(hp3, hp1, 1) cond2 = EqualsCondition(hp3, hp2, 1) cs = ConfigurationSpace() cs.add_hyperparameter(hp1) cs.add_hyperparameter(hp2) cs.add_hyperparameter(hp3) cs.add_condition(cond1) self.assertRaisesRegexp( ValueError, "Adding a second condition \(different\) for a " "hyperparameter is ambigouos and " "therefore forbidden. Add a conjunction " "instead!", cs.add_condition, cond2)
def test_check_forbidden_with_sampled_vector_configuration(self): cs = ConfigurationSpace() metric = CategoricalHyperparameter("metric", ["minkowski", "other"]) cs.add_hyperparameter(metric) forbidden = ForbiddenEqualsClause(metric, "other") cs.add_forbidden_clause(forbidden) configuration = Configuration(cs, vector=np.ones(1, dtype=float)) self.assertRaisesRegexp(ValueError, "violates forbidden clause", cs._check_forbidden, configuration.get_array())
def test_illegal_default_configuration(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("loss", ["l1", "l2"], default_value='l1') hp2 = CategoricalHyperparameter("penalty", ["l1", "l2"], default_value='l1') cs.add_hyperparameter(hp1) cs.add_hyperparameter(hp2) forb1 = ForbiddenEqualsClause(hp1, "l1") forb2 = ForbiddenEqualsClause(hp2, "l1") forb3 = ForbiddenAndConjunction(forb1, forb2) # cs.add_forbidden_clause(forb3) self.assertRaisesRegex( ValueError, r"Given vector violates forbidden clause \(Forbidden: loss == \'l1\' && " r"Forbidden: penalty == \'l1\'\)", cs.add_forbidden_clause, forb3, )
def test_random_neighbor_failing(self): hp = Constant('a', 'b') self.assertRaisesRegex(ValueError, 'Probably caught in an infinite ' 'loop.', self._test_random_neigbor, hp) hp = CategoricalHyperparameter('a', ['a']) self.assertRaisesRegex(ValueError, 'Probably caught in an infinite ' 'loop.', self._test_random_neigbor, hp)
def test_add_configuration_space_conjunctions(self): cs1 = ConfigurationSpace() cs2 = ConfigurationSpace() hp1 = cs1.add_hyperparameter(CategoricalHyperparameter("input1", [0, 1])) hp2 = cs1.add_hyperparameter(CategoricalHyperparameter("input2", [0, 1])) hp3 = cs1.add_hyperparameter(UniformIntegerHyperparameter("child1", 0, 10)) hp4 = cs1.add_hyperparameter(UniformIntegerHyperparameter("child2", 0, 10)) cond1 = EqualsCondition(hp2, hp3, 0) cond2 = EqualsCondition(hp1, hp3, 5) cond3 = EqualsCondition(hp1, hp4, 1) andCond = AndConjunction(cond2, cond3) cs1.add_conditions([cond1, andCond]) cs2.add_configuration_space(prefix='test', configuration_space=cs1) self.assertEqual(str(cs2).count('test:'), 10) # Check that they're equal except for the "test:" prefix self.assertEqual(str(cs1), str(cs2).replace('test:', ''))
def choice(label: str, options: List, default=None): if len(options) == 1: return Constant(label, _encode(options[0])) choices = [] for option in options: choices.append(_encode(option)) kwargs = {} if default: kwargs.update({'default_value': _encode(default)}) hp = CategoricalHyperparameter(label, choices, **kwargs) return hp
def test_add_conjunction(self): hp1 = CategoricalHyperparameter("input1", [0, 1]) hp2 = CategoricalHyperparameter("input2", [0, 1]) hp3 = CategoricalHyperparameter("input3", [0, 1]) hp4 = Constant("And", "True") cond1 = EqualsCondition(hp4, hp1, 1) cond2 = EqualsCondition(hp4, hp2, 1) cond3 = EqualsCondition(hp4, hp3, 1) andconj1 = AndConjunction(cond1, cond2, cond3) cs = ConfigurationSpace() cs.add_hyperparameter(hp1) cs.add_hyperparameter(hp2) cs.add_hyperparameter(hp3) cs.add_hyperparameter(hp4) cs.add_condition(andconj1) self.assertNotIn(hp4, cs.get_all_unconditional_hyperparameters())
def test_get_hyperparameters_topological_sort_simple(self): for iteration in range(10): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) # This automatically checks the configuration! Configuration(cs, dict(parent=0, child=5))
def test_estimate_size(self): cs = ConfigurationSpace() self.assertEqual(cs.estimate_size(), 0) cs.add_hyperparameter(Constant('constant', 0)) self.assertEqual(cs.estimate_size(), 1) cs.add_hyperparameter(UniformIntegerHyperparameter('integer', 0, 5)) self.assertEqual(cs.estimate_size(), 6) cs.add_hyperparameter(CategoricalHyperparameter('cat', [0, 1, 2])) self.assertEqual(cs.estimate_size(), 18) cs.add_hyperparameter(UniformFloatHyperparameter('float', 0, 1)) self.assertTrue(np.isinf(cs.estimate_size()))
def set_training_space(cs: ConfigurationSpace): ''' Set hyperparameters for training ''' batch_size = CategoricalHyperparameter('batch_size', [16, 32], default_value=32) keep_prob = UniformFloatHyperparameter('keep_prob', 0, 0.99, default_value=0.5) cs.add_hyperparameters([batch_size, keep_prob])
def test_condition_with_cycles(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) cond2 = EqualsCondition(hp1, hp2, 0) self.assertRaisesRegex(ValueError, r"Hyperparameter configuration " r"contains a cycle \[\['child', 'parent'\]\]", cs.add_condition, cond2)
def test_get_hyperparameter(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) retval = cs.get_hyperparameter("parent") self.assertEqual(hp1, retval) retval = cs.get_hyperparameter("child") self.assertEqual(hp2, retval) self.assertRaises(KeyError, cs.get_hyperparameter, "grandfather")
def set_optimizer_space(cs: ConfigurationSpace): ''' Set hyperparameters for optimizers ''' optimizer = CategoricalHyperparameter('optimizer', ['SGD', 'Adam'], default_value='Adam') sgd_lr = UniformFloatHyperparameter('sgd_lr', 0.00001, 0.1, default_value=0.005, log=True) # log scale sgd_decay = UniformFloatHyperparameter('sgd_decay', 0.0001, 0.1, default_value=0.05, log=True) # log scale sgd_momentum = UniformFloatHyperparameter('sgd_momentum', 0.3, 0.99, default_value=0.9) adam_lr = UniformFloatHyperparameter('adam_lr', 0.00001, 0.1, default_value=0.005, log=True) # log scale adam_decay = UniformFloatHyperparameter('adam_decay', 0.0001, 0.1, default_value=0.05, log=True) # log scale sgd_lr_cond = InCondition(child=sgd_lr, parent=optimizer, values=['SGD']) sgd_decay_cond = InCondition(child=sgd_decay, parent=optimizer, values=['SGD']) sgd_momentum_cond = InCondition(child=sgd_momentum, parent=optimizer, values=['SGD']) adam_lr_cond = InCondition(child=adam_lr, parent=optimizer, values=['Adam']) adam_decay_cond = InCondition(child=adam_decay, parent=optimizer, values=['Adam']) cs.add_hyperparameters( [optimizer, sgd_lr, sgd_decay, sgd_momentum, adam_lr, adam_decay]) cs.add_conditions([ sgd_lr_cond, sgd_decay_cond, sgd_momentum_cond, adam_lr_cond, adam_decay_cond ])
def test_get_hyperparamforbidden_clauseseters(self): cs = ConfigurationSpace() self.assertEqual(0, len(cs.get_hyperparameters())) hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) self.assertEqual([hp1], cs.get_hyperparameters()) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 1) cs.add_condition(cond1) self.assertEqual([hp1, hp2], cs.get_hyperparameters()) # TODO: I need more tests for the topological sort! self.assertEqual([hp1, hp2], cs.get_hyperparameters())
def test_check_configuration2(self): # Test that hyperparameters which are not active must not be set and # that evaluating forbidden clauses does not choke on missing # hyperparameters cs = ConfigurationSpace() classifier = CategoricalHyperparameter("classifier", ["k_nearest_neighbors", "extra_trees"]) metric = CategoricalHyperparameter("metric", ["minkowski", "other"]) p = CategoricalHyperparameter("k_nearest_neighbors:p", [1, 2]) metric_depends_on_classifier = EqualsCondition(metric, classifier, "k_nearest_neighbors") p_depends_on_metric = EqualsCondition(p, metric, "minkowski") cs.add_hyperparameter(metric) cs.add_hyperparameter(p) cs.add_hyperparameter(classifier) cs.add_condition(metric_depends_on_classifier) cs.add_condition(p_depends_on_metric) forbidden = ForbiddenEqualsClause(metric, "other") cs.add_forbidden_clause(forbidden) configuration = Configuration(cs, dict(classifier="extra_trees"))
def test_sample_configuration_with_or_conjunction(self): cs = ConfigurationSpace(seed=1) hyper_params = {} hyper_params["hp5"] = CategoricalHyperparameter("hp5", ['0', '1', '2']) hyper_params["hp7"] = CategoricalHyperparameter("hp7", ['3', '4', '5']) hyper_params["hp8"] = CategoricalHyperparameter("hp8", ['6', '7', '8']) for key in hyper_params: cs.add_hyperparameter(hyper_params[key]) cs.add_condition( InCondition(hyper_params["hp5"], hyper_params["hp8"], ['6'])) cs.add_condition( OrConjunction( InCondition(hyper_params["hp7"], hyper_params["hp8"], ['7']), InCondition(hyper_params["hp7"], hyper_params["hp5"], ['1']))) for cfg, fixture in zip(cs.sample_configuration(10), [[1, np.NaN, 2], [0, 2, np.NaN], [0, 1, 1], [1, np.NaN, 2], [1, np.NaN, 2]]): np.testing.assert_array_almost_equal(cfg.get_array(), fixture)
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() BaseImageClassificationModel.set_training_space(cs) BaseImageClassificationModel.set_optimizer_space(cs) vgg_kernel_size = CategoricalHyperparameter('vgg_kernel_size', [3, 5], default_value=3) vgg_keep_prob = UniformFloatHyperparameter('vgg_keep_prob', 0, 0.99, default_value=0.5) vgg_block2_layer = UniformIntegerHyperparameter('vgg_block2_layer', 2, 3, default_value=2) vgg_block3_layer = UniformIntegerHyperparameter('vgg_block3_layer', 2, 5, default_value=3) vgg_block4_layer = UniformIntegerHyperparameter('vgg_block4_layer', 2, 5, default_value=3) vgg_block5_layer = UniformIntegerHyperparameter('vgg_block5_layer', 2, 5, default_value=3) cs.add_hyperparameters( [vgg_kernel_size, vgg_keep_prob, vgg_block2_layer, vgg_block3_layer, vgg_block4_layer, vgg_block5_layer]) return cs
def test_add_conditions(self): cs1 = ConfigurationSpace() cs2 = ConfigurationSpace() hp1 = cs1.add_hyperparameter(CategoricalHyperparameter("input1", [0, 1])) cs2.add_hyperparameter(hp1) hp2 = cs1.add_hyperparameter(CategoricalHyperparameter("input2", [0, 1])) cs2.add_hyperparameter(hp2) hp3 = cs1.add_hyperparameter(UniformIntegerHyperparameter("child1", 0, 10)) cs2.add_hyperparameter(hp3) hp4 = cs1.add_hyperparameter(UniformIntegerHyperparameter("child2", 0, 10)) cs2.add_hyperparameter(hp4) cond1 = EqualsCondition(hp2, hp3, 0) cond2 = EqualsCondition(hp1, hp3, 5) cond3 = EqualsCondition(hp1, hp4, 1) andCond = AndConjunction(cond2, cond3) cs1.add_conditions([cond1, andCond]) cs2.add_condition(cond1) cs2.add_condition(andCond) self.assertEqual(str(cs1), str(cs2))
def test_add_forbidden_clause(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("input1", [0, 1]) cs.add_hyperparameter(hp1) forb = ForbiddenEqualsClause(hp1, 1) # TODO add checking whether a forbidden clause makes sense at all cs.add_forbidden_clause(forb) # TODO add something to properly retrieve the forbidden clauses self.assertEqual(str(cs), "Configuration space object:\n " "Hyperparameters:\n input1, " "Type: Categorical, Choices: {0, 1}, " "Default: 0\n" " Forbidden Clauses:\n" " Forbidden: input1 == 1\n")
def test_with_ordinal(self): cs = smac.configspace.ConfigurationSpace() _ = cs.add_hyperparameter( CategoricalHyperparameter('a', [0, 1], default_value=0)) _ = cs.add_hyperparameter( OrdinalHyperparameter('b', [0, 1], default_value=1)) _ = cs.add_hyperparameter( UniformFloatHyperparameter('c', lower=0., upper=1., default_value=1)) _ = cs.add_hyperparameter( UniformIntegerHyperparameter('d', lower=0, upper=10, default_value=1)) cs.seed(1) feat_array = np.array([0, 0, 0]).reshape(1, -1) types, bounds = get_types(cs, feat_array) model = RandomForestWithInstances( configspace=cs, types=types, bounds=bounds, instance_features=feat_array, seed=1, ratio_features=1.0, pca_components=9, ) self.assertEqual(bounds[0][0], 2) self.assertTrue(bounds[0][1] is np.nan) self.assertEqual(bounds[1][0], 0) self.assertEqual(bounds[1][1], 1) self.assertEqual(bounds[2][0], 0.) self.assertEqual(bounds[2][1], 1.) self.assertEqual(bounds[3][0], 0.) self.assertEqual(bounds[3][1], 1.) X = np.array( [[0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0.], [0., 1., 0., 9., 0., 0., 0.], [0., 1., 1., 4., 0., 0., 0.]], dtype=np.float64) y = np.array([0, 1, 2, 3], dtype=np.float64) X_train = np.vstack((X, X, X, X, X, X, X, X, X, X)) y_train = np.vstack((y, y, y, y, y, y, y, y, y, y)) model.train(X_train, y_train.reshape((-1, 1))) mean, _ = model.predict(X) for idx, m in enumerate(mean): self.assertAlmostEqual(y[idx], m, 0.05)
def string2hyperparameter(hp_desc: str): # Only support type, range, default_value, log, q # Sample: x2, Type: UniformInteger, Range: [1, 15], Default: 4, on log-scale, Q: 2 q = -1 log = None default_value = None params = hp_desc.split(',') cur_idx = -1 while default_value is None: if q == -1: if 'Q:' in params[cur_idx]: q = float(params[cur_idx][4:]) cur_idx -= 1 continue else: q = None if log is None: if 'log-scale' in params[cur_idx]: log = True cur_idx -= 1 continue else: log = False if default_value is None: default_value = str(params[cur_idx][10:]) cur_idx -= 1 prefix_params = ','.join(params[:cur_idx + 1]) range_str = prefix_params.split(':')[-1] if range_str[-1] == ']': element_list = range_str[2:-1].split(',') range = [float(element_list[0]), float(element_list[1])] else: element_list = range_str[1:-1].split(',') range = [element[1:] for element in element_list] type_str = prefix_params.split(':')[-2].split(',')[0][1:] name_str = ':'.join(prefix_params.split(':')[:-2]) name = ','.join(name_str.split(',')[:-1])[4:] if type_str == 'UniformFloat': return UniformFloatHyperparameter(name, range[0], range[1], default_value=float(default_value), log=log, q=q) elif type_str == 'UniformInteger': return UniformIntegerHyperparameter(name, range[0], range[1], default_value=int(default_value), log=log, q=q) elif type_str == 'Categorical': return CategoricalHyperparameter(name, range, default_value=default_value) else: raise ValueError('Hyperparameter type %s not supported!' % type)
def get_hyperparameter_search_space(dataset_properties=None): # TODO add hyperparameter to gbdt binning cs = ConfigurationSpace() binning_method = CategoricalHyperparameter() # shrinkage = UniformFloatHyperparameter( # name="shrinkage", lower=0.0, upper=1.0, default_value=0.5 # ) # n_components = UniformIntegerHyperparameter( # name="n_components", lower=1, upper=29, default_value=10 # ) # tol = UniformFloatHyperparameter( # name="tol", lower=0.0001, upper=1, default_value=0.0001 # ) cs.add_hyperparameters([binning_method]) return cs
def get_cash_cs(include_algorithms=None, task_type=CLASSIFICATION): _candidates = get_combined_candidtates(_classifiers, _addons) if include_algorithms is not None: _candidates = set(include_algorithms).intersection(set(_candidates.keys())) if len(_candidates) == 0: raise ValueError("No algorithms included! Please check the spelling of the included algorithms!") cs = ConfigurationSpace() algo = CategoricalHyperparameter('algorithm', list(_candidates)) cs.add_hyperparameter(algo) for estimator_id in _candidates: estimator_cs = get_hpo_cs(estimator_id) parent_hyperparameter = {'parent': algo, 'value': estimator_id} cs.add_configuration_space(estimator_id, estimator_cs, parent_hyperparameter=parent_hyperparameter) return cs
def test_meta_field(self): cs = ConfigurationSpace() cs.add_hyperparameter( UniformIntegerHyperparameter("uihp", lower=1, upper=10, meta=dict(uihp=True))) cs.add_hyperparameter( NormalIntegerHyperparameter("nihp", mu=0, sigma=1, meta=dict(nihp=True))) cs.add_hyperparameter( UniformFloatHyperparameter("ufhp", lower=1, upper=10, meta=dict(ufhp=True))) cs.add_hyperparameter( NormalFloatHyperparameter("nfhp", mu=0, sigma=1, meta=dict(nfhp=True))) cs.add_hyperparameter( CategoricalHyperparameter("chp", choices=['1', '2', '3'], meta=dict(chp=True))) cs.add_hyperparameter( OrdinalHyperparameter("ohp", sequence=['1', '2', '3'], meta=dict(ohp=True))) cs.add_hyperparameter(Constant("const", value=1, meta=dict(const=True))) parent = ConfigurationSpace() parent.add_configuration_space("sub", cs, delimiter=':') self.assertEqual( parent.get_hyperparameter("sub:uihp").meta, dict(uihp=True)) self.assertEqual( parent.get_hyperparameter("sub:nihp").meta, dict(nihp=True)) self.assertEqual( parent.get_hyperparameter("sub:ufhp").meta, dict(ufhp=True)) self.assertEqual( parent.get_hyperparameter("sub:nfhp").meta, dict(nfhp=True)) self.assertEqual( parent.get_hyperparameter("sub:chp").meta, dict(chp=True)) self.assertEqual( parent.get_hyperparameter("sub:ohp").meta, dict(ohp=True)) self.assertEqual( parent.get_hyperparameter("sub:const").meta, dict(const=True))
def maxsat(n_eval, n_variables, random_seed): assert n_variables in [28, 43, 60] if n_variables == 28: evaluator = MaxSAT28(random_seed) elif n_variables == 43: evaluator = MaxSAT43(random_seed) elif n_variables == 60: evaluator = MaxSAT60(random_seed) name_tag = 'maxsat' + str(n_variables) + '_' + datetime.now().strftime( "%Y-%m-%d-%H:%M:%S:%f") cs = ConfigurationSpace() for i in range(n_variables): car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2), [str(elm) for elm in range(2)], default_value='0') cs.add_hyperparameter(car_var) init_points_numpy = evaluator.suggested_init.long().numpy() init_points = [] for i in range(init_points_numpy.shape[0]): init_points.append( Configuration( cs, { 'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j]) for j in range(n_variables) })) def evaluate(x): x_tensor = torch.LongTensor( [int(x['x' + str(j + 1).zfill(2)]) for j in range(n_variables)]) return evaluator.evaluate(x_tensor).item() print('Began at ' + datetime.now().strftime("%H:%M:%S")) scenario = Scenario({ "run_obj": "quality", "runcount-limit": n_eval, "cs": cs, "deterministic": "true", 'output_dir': os.path.join(EXP_DIR, name_tag) }) smac = SMAC(scenario=scenario, tae_runner=evaluate, initial_configurations=init_points) smac.optimize() evaluations, optimum = evaluations_from_smac(smac) print('Finished at ' + datetime.now().strftime("%H:%M:%S")) return optimum
def pest_control(n_eval, random_seed): evaluator = PestControl(random_seed) name_tag = 'pestcontrol_' + datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f") cs = ConfigurationSpace() for i in range(PESTCONTROL_N_STAGES): car_var = CategoricalHyperparameter( 'x' + str(i + 1).zfill(2), [str(elm) for elm in range(PESTCONTROL_N_CHOICE)], default_value='0') cs.add_hyperparameter(car_var) init_points_numpy = sample_init_points([PESTCONTROL_N_CHOICE] * PESTCONTROL_N_STAGES, 20, random_seed).long().numpy() init_points = [] for i in range(init_points_numpy.shape[0]): init_points.append( Configuration( cs, { 'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j]) for j in range(PESTCONTROL_N_STAGES) })) def evaluate(x): x_tensor = torch.LongTensor([ int(x['x' + str(j + 1).zfill(2)]) for j in range(PESTCONTROL_N_STAGES) ]) return evaluator.evaluate(x_tensor).item() print('Began at ' + datetime.now().strftime("%H:%M:%S")) scenario = Scenario({ "run_obj": "quality", "runcount-limit": n_eval, "cs": cs, "deterministic": "true", 'output_dir': os.path.join(EXP_DIR, name_tag) }) smac = SMAC(scenario=scenario, tae_runner=evaluate, initial_configurations=init_points) smac.optimize() evaluations, optimum = evaluations_from_smac(smac) print('Finished at ' + datetime.now().strftime("%H:%M:%S")) return optimum
def test_add_configuration_space(self): cs = ConfigurationSpace() hp1 = cs.add_hyperparameter(CategoricalHyperparameter("input1", [0, 1])) cs.add_forbidden_clause(ForbiddenEqualsClause(hp1, 1)) hp2 = cs.add_hyperparameter(UniformIntegerHyperparameter("child", 0, 10)) cs.add_condition(EqualsCondition(hp2, hp1, 0)) cs2 = ConfigurationSpace() cs2.add_configuration_space('prefix', cs, delimiter='__') self.assertEqual(str(cs2), '''Configuration space object: Hyperparameters: prefix__child, Type: UniformInteger, Range: [0, 10], Default: 5 prefix__input1, Type: Categorical, Choices: {0, 1}, Default: 0 Conditions: prefix__child | prefix__input1 == 0 Forbidden Clauses: Forbidden: prefix__input1 == 1 ''')
def contamination(n_eval, lamda, random_seed_pair): evaluator = Contamination(lamda, random_seed_pair) name_tag = '_'.join([ 'contamination', ('%.2E' % lamda), datetime.now().strftime("%Y-%m-%d-%H:%M:%S:%f") ]) cs = ConfigurationSpace() for i in range(CONTAMINATION_N_STAGES): car_var = CategoricalHyperparameter('x' + str(i + 1).zfill(2), [str(elm) for elm in range(2)], default_value='0') cs.add_hyperparameter(car_var) init_points_numpy = evaluator.suggested_init.long().numpy() init_points = [] for i in range(init_points_numpy.shape[0]): init_points.append( Configuration( cs, { 'x' + str(j + 1).zfill(2): str(init_points_numpy[i][j]) for j in range(CONTAMINATION_N_STAGES) })) def evaluate(x): x_tensor = torch.LongTensor([ int(x['x' + str(j + 1).zfill(2)]) for j in range(CONTAMINATION_N_STAGES) ]) return evaluator.evaluate(x_tensor).item() print('Began at ' + datetime.now().strftime("%H:%M:%S")) scenario = Scenario({ "run_obj": "quality", "runcount-limit": n_eval, "cs": cs, "deterministic": "true", 'output_dir': os.path.join(EXP_DIR, name_tag) }) smac = SMAC(scenario=scenario, tae_runner=evaluate, initial_configurations=init_points) smac.optimize() evaluations, optimum = evaluations_from_smac(smac) print('Finished at ' + datetime.now().strftime("%H:%M:%S")) return optimum