def test_uniformfloat_transform(self): """This checks whether a value sampled through the configuration space (it does not happend when the variable is sampled alone) stays equal when it is serialized via JSON and the deserialized again.""" cs = ConfigurationSpace() a = cs.add_hyperparameter(UniformFloatHyperparameter('a', -5, 10)) b = cs.add_hyperparameter( NormalFloatHyperparameter('b', 1, 2, log=True)) for i in range(100): config = cs.sample_configuration() value = OrderedDict(sorted(config.get_dictionary().items())) string = json.dumps(value) saved_value = json.loads(string) saved_value = OrderedDict(sorted(byteify(saved_value).items())) self.assertEqual(repr(value), repr(saved_value)) # Next, test whether the truncation also works when initializing the # Configuration with a dictionary for i in range(100): rs = np.random.RandomState(1) value_a = a.sample(rs) value_b = b.sample(rs) values_dict = {'a': value_a, 'b': value_b} config = Configuration(cs, values=values_dict) string = json.dumps(config.get_dictionary()) saved_value = json.loads(string) saved_value = byteify(saved_value) self.assertEqual(values_dict, saved_value)
def impute_inactive_values(configuration: Configuration, strategy: Union[str, float]='default') -> Configuration: """Impute inactive parameters. Parameters ---------- strategy : string, optional (default='default') The imputation strategy. - If 'default', replace inactive parameters by their default. - If float, replace inactive parameters by the given float value, which should be able to be splitted apart by a tree-based model. """ values = dict() for hp in configuration.configuration_space.get_hyperparameters(): value = configuration.get(hp.name) if value is None: if strategy == 'default': new_value = hp.default_value elif isinstance(strategy, float): new_value = strategy else: raise ValueError('Unknown imputation strategy %s' % str(strategy)) value = new_value values[hp.name] = value new_configuration = Configuration(configuration.configuration_space, values=values, allow_inactive_with_values=True) return new_configuration
def test_check_neighbouring_config_diamond_str(self): diamond = ConfigurationSpace() head = CategoricalHyperparameter('head', ['red', 'green']) left = CategoricalHyperparameter('left', ['red', 'green']) right = CategoricalHyperparameter('right', ['red', 'green', 'blue', 'yellow']) bottom = CategoricalHyperparameter('bottom', ['red', 'green']) diamond.add_hyperparameters([head, left, right, bottom]) diamond.add_condition(EqualsCondition(left, head, 'red')) diamond.add_condition(EqualsCondition(right, head, 'red')) diamond.add_condition( AndConjunction(EqualsCondition(bottom, left, 'green'), EqualsCondition(bottom, right, 'green'))) config = Configuration(diamond, { 'bottom': 'red', 'head': 'red', 'left': 'green', 'right': 'green' }) hp_name = "head" index = diamond.get_idx_by_hyperparameter_name(hp_name) neighbor_value = 1 new_array = ConfigSpaceNNI.c_util.change_hp_value( diamond, config.get_array(), hp_name, neighbor_value, index) expected_array = np.array([1, np.nan, np.nan, np.nan]) np.testing.assert_almost_equal(new_array, expected_array)
def test_check_forbidden_with_sampled_vector_configuration(self): cs = ConfigurationSpace() metric = CategoricalHyperparameter("metric", ["minkowski", "other"]) cs.add_hyperparameter(metric) forbidden = ForbiddenEqualsClause(metric, "other") cs.add_forbidden_clause(forbidden) configuration = Configuration(cs, vector=np.ones(1, dtype=float)) self.assertRaisesRegexp(ValueError, "violates forbidden clause", cs._check_forbidden, configuration.get_array())
def deactivate_inactive_hyperparameters( configuration: Dict, configuration_space: ConfigurationSpace, ): hyperparameters = configuration_space.get_hyperparameters() configuration = Configuration(configuration_space=configuration_space, values=configuration, allow_inactive_with_values=True) hps = deque() unconditional_hyperparameters = configuration_space.get_all_unconditional_hyperparameters() hyperparameters_with_children = list() for uhp in unconditional_hyperparameters: children = configuration_space._children_of[uhp] if len(children) > 0: hyperparameters_with_children.append(uhp) hps.extendleft(hyperparameters_with_children) inactive = set() while len(hps) > 0: hp = hps.pop() children = configuration_space._children_of[hp] for child in children: conditions = configuration_space._parent_conditions_of[child.name] for condition in conditions: if not condition.evaluate_vector(configuration.get_array()): dic = configuration.get_dictionary() try: del dic[child.name] except KeyError: continue configuration = Configuration( configuration_space=configuration_space, values=dic, allow_inactive_with_values=True) inactive.add(child.name) hps.appendleft(child.name) for hp in hyperparameters: if hp.name in inactive: dic = configuration.get_dictionary() try: del dic[hp.name] except KeyError: continue configuration = Configuration( configuration_space=configuration_space, values=dic, allow_inactive_with_values=True) return Configuration(configuration_space, values=configuration.get_dictionary())
def test_check_configuration2(self): # Test that hyperparameters which are not active must not be set and # that evaluating forbidden clauses does not choke on missing # hyperparameters cs = ConfigurationSpace() classifier = CategoricalHyperparameter( "classifier", ["k_nearest_neighbors", "extra_trees"]) metric = CategoricalHyperparameter("metric", ["minkowski", "other"]) p = CategoricalHyperparameter("k_nearest_neighbors:p", [1, 2]) metric_depends_on_classifier = EqualsCondition(metric, classifier, "k_nearest_neighbors") p_depends_on_metric = EqualsCondition(p, metric, "minkowski") cs.add_hyperparameter(metric) cs.add_hyperparameter(p) cs.add_hyperparameter(classifier) cs.add_condition(metric_depends_on_classifier) cs.add_condition(p_depends_on_metric) forbidden = ForbiddenEqualsClause(metric, "other") cs.add_forbidden_clause(forbidden) configuration = Configuration(cs, dict(classifier="extra_trees")) # check backward compatibility with checking configurations instead of vectors cs.check_configuration(configuration)
def generate_config(cs, rs): i = rs.randint(-10, 10) f = rs.rand(1)[0] seed = rs.randint(0, 10000) # 'a' occurs more often than 'b' c = 'a' if rs.binomial(1, 0.2) == 0 else 'b' # We have 100 instance, but prefer the middle ones instance_id = int(rs.normal(loc=50, scale=20, size=1)[0]) instance_id = min(max(0, instance_id), 100) status = StatusType.SUCCESS runtime = 10**(numpy.sin(i)+f) + seed/10000 - numpy.sin(instance_id) if runtime > 40: status = StatusType.TIMEOUT runtime = 40 elif instance_id > 50 and runtime > 15: # This is a timeout with probability 0.5 status = StatusType.TIMEOUT runtime /= 2.0 config = Configuration(cs, values={'cat_a_b': c, 'float_0_1': f, 'integer_0_100': i}) return config, seed, runtime, status, instance_id
def setUp(self): unittest.TestCase.setUp(self) self.rh = runhistory.RunHistory(aggregate_func=average_cost) self.cs = get_config_space() self.config1 = Configuration(self.cs, values={'a': 0, 'b': 100}) self.config2 = Configuration(self.cs, values={'a': 100, 'b': 0}) self.config3 = Configuration(self.cs, values={'a': 100, 'b': 100}) self.scen = Scenario({'run_obj': 'runtime', 'cutoff_time': 20, 'cs': self.cs}) self.types, self.bounds = get_types(self.cs, None) self.scen = Scenario({'run_obj': 'runtime', 'cutoff_time': 20, 'cs': self.cs, 'output_dir': ''})
def import_data(self, data): """ Import additional data for tuning. Parameters ---------- data : list of dict Each of which has at least two keys, ``parameter`` and ``value``. """ _completed_num = 0 for trial_info in data: self.logger.info( "Importing data, current processing progress %s / %s", _completed_num, len(data)) # simply validate data format assert "parameter" in trial_info _params = trial_info["parameter"] assert "value" in trial_info _value = trial_info['value'] if not _value: self.logger.info( "Useless trial data, value is %s, skip this trial data.", _value) continue _value = extract_scalar_reward(_value) # convert the keys in loguniform and categorical types valid_entry = True for key, value in _params.items(): if key in self.loguniform_key: _params[key] = np.log(value) elif key in self.categorical_dict: if value in self.categorical_dict[key]: _params[key] = self.categorical_dict[key].index(value) else: self.logger.info( "The value %s of key %s is not in search space.", str(value), key) valid_entry = False break if not valid_entry: continue # start import this data entry _completed_num += 1 config = Configuration(self.cs, values=_params) if self.optimize_mode is OptimizeMode.Maximize: _value = -_value if self.first_one: self.smbo_solver.nni_smac_receive_first_run(config, _value) self.first_one = False else: self.smbo_solver.nni_smac_receive_runs(config, _value) self.logger.info( "Successfully import data to smac tuner, total data: %d, imported data: %d.", len(data), _completed_num)
def test_get_hyperparameters_topological_sort_simple(self): for iteration in range(10): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) # This automatically checks the configuration! Configuration(cs, dict(parent=0, child=5))
def setUp(self): unittest.TestCase.setUp(self) self.rh = RunHistory(aggregate_func=average_cost) self.cs = get_config_space() self.config1 = Configuration(self.cs, values={'a': 0, 'b': 100}) self.config2 = Configuration(self.cs, values={'a': 100, 'b': 0}) self.config3 = Configuration(self.cs, values={'a': 100, 'b': 100}) self.scen = Scenario({ "cutoff_time": 2, 'cs': self.cs, "run_obj": 'runtime', "output_dir": '' }) self.stats = Stats(scenario=self.scen) self.stats.start_timing() self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
def test_sample_configuration(self): cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) # This automatically checks the configuration! Configuration(cs, dict(parent=0, child=5)) # and now for something more complicated cs = ConfigurationSpace(seed=1) hp1 = CategoricalHyperparameter("input1", [0, 1]) cs.add_hyperparameter(hp1) hp2 = CategoricalHyperparameter("input2", [0, 1]) cs.add_hyperparameter(hp2) hp3 = CategoricalHyperparameter("input3", [0, 1]) cs.add_hyperparameter(hp3) hp4 = CategoricalHyperparameter("input4", [0, 1]) cs.add_hyperparameter(hp4) hp5 = CategoricalHyperparameter("input5", [0, 1]) cs.add_hyperparameter(hp5) hp6 = Constant("AND", "True") cs.add_hyperparameter(hp6) cond1 = EqualsCondition(hp6, hp1, 1) cond2 = NotEqualsCondition(hp6, hp2, 1) cond3 = InCondition(hp6, hp3, [1]) cond4 = EqualsCondition(hp5, hp3, 1) cond5 = EqualsCondition(hp4, hp5, 1) cond6 = EqualsCondition(hp6, hp4, 1) cond7 = EqualsCondition(hp6, hp5, 1) conj1 = AndConjunction(cond1, cond2) conj2 = OrConjunction(conj1, cond3) conj3 = AndConjunction(conj2, cond6, cond7) cs.add_condition(cond4) cs.add_condition(cond5) cs.add_condition(conj3) samples = [] for i in range(5): cs.seed(1) samples.append([]) for j in range(100): sample = cs.sample_configuration() samples[-1].append(sample) if i > 0: for j in range(100): self.assertEqual(samples[-1][j], samples[-2][j])
def test_get_config_runs(self): ''' get some config runs from runhistory ''' rh = RunHistory(aggregate_func=average_cost) cs = get_config_space() config1 = Configuration(cs, values={'a': 1, 'b': 2}) config2 = Configuration(cs, values={'a': 1, 'b': 3}) rh.add(config=config1, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=1) rh.add(config=config2, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=1) rh.add(config=config1, cost=10, time=20, status=StatusType.SUCCESS, instance_id=2, seed=2) ist = rh.get_runs_for_config(config=config1) #print(ist) #print(ist[0]) #print(ist[1]) self.assertEqual(len(ist), 2) self.assertEqual(ist[0].instance, 1) self.assertEqual(ist[1].instance, 2)
def test_init_with_values(self): c1 = Configuration(self.cs, values={ 'parent': 1, 'child': 2, 'friend': 3 }) # Pay attention that the vector does not necessarily has an intuitive # sorting! # Values are a little bit higher than one would expect because, # an integer range of [0,10] is transformed to [-0.499,10.499]. vector_values = { 'parent': 1, 'child': 0.22727223140405708, 'friend': 0.583333611112037 } vector = [None] * 3 for name in self.cs._hyperparameter_idx: vector[self.cs._hyperparameter_idx[name]] = vector_values[name] c2 = Configuration(self.cs, vector=vector) # This tests # a) that the vector representation of both are the same # b) that the dictionary representation of both are the same self.assertEqual(c1, c2)
def test_full_update(self): rh = RunHistory(aggregate_func=average_cost) cs = get_config_space() config1 = Configuration(cs, values={'a': 1, 'b': 2}) config2 = Configuration(cs, values={'a': 1, 'b': 3}) rh.add(config=config1, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=1) rh.add(config=config2, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=1) rh.add(config=config2, cost=20, time=20, status=StatusType.SUCCESS, instance_id=2, seed=2) cost_config2 = rh.get_cost(config2) rh.compute_all_costs() updated_cost_config2 = rh.get_cost(config2) self.assertTrue(cost_config2 == updated_cost_config2) rh.compute_all_costs(instances=[2]) updated_cost_config2 = rh.get_cost(config2) self.assertTrue(cost_config2 != updated_cost_config2) self.assertTrue(updated_cost_config2 == 20)
def test_add_multiple_times(self): rh = RunHistory(aggregate_func=average_cost) cs = get_config_space() config = Configuration(cs, values={'a': 1, 'b': 2}) for i in range(5): rh.add(config=config, cost=i + 1, time=i + 1, status=StatusType.SUCCESS, instance_id=None, seed=12345, additional_info=None) self.assertEqual(len(rh.data), 1) self.assertEqual(len(rh.get_runs_for_config(config)), 1) self.assertEqual(len(rh._configid_to_inst_seed[1]), 1) self.assertEqual(list(rh.data.values())[0].cost, 1)
def test_incremental_update(self): rh = RunHistory(aggregate_func=average_cost) cs = get_config_space() config1 = Configuration(cs, values={'a': 1, 'b': 2}) rh.add(config=config1, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=1) self.assertTrue(rh.get_cost(config1) == 10) rh.add(config=config1, cost=20, time=20, status=StatusType.SUCCESS, instance_id=2, seed=1) self.assertTrue(rh.get_cost(config1) == 15)
def test_add_and_pickle(self): ''' simply adding some rundata to runhistory, then pickle it ''' rh = RunHistory(aggregate_func=average_cost) cs = get_config_space() config = Configuration(cs, values={'a': 1, 'b': 2}) self.assertTrue(rh.empty()) rh.add(config=config, cost=10, time=20, status=StatusType.SUCCESS, instance_id=None, seed=None, additional_info=None) rh.add(config=config, cost=10, time=20, status=StatusType.SUCCESS, instance_id=1, seed=12354, additional_info={"start_time": 10}) self.assertFalse(rh.empty()) tmpfile = tempfile.NamedTemporaryFile(mode='wb', delete=False) pickle.dump(rh, tmpfile, -1) name = tmpfile.name tmpfile.close() with open(name, 'rb') as fh: loaded_rh = pickle.load(fh) self.assertEqual(loaded_rh.data, rh.data)
def test_merge_foreign_data(self): ''' test smac.utils.merge_foreign_data ''' scenario = Scenario(self.test_scenario_dict) scenario_2 = Scenario(self.test_scenario_dict) scenario_2.feature_dict = {"inst_new": [4]} # init cs cs = ConfigurationSpace() cs.add_hyperparameter( UniformIntegerHyperparameter(name='a', lower=0, upper=100)) cs.add_hyperparameter( UniformIntegerHyperparameter(name='b', lower=0, upper=100)) # build runhistory rh_merge = RunHistory(aggregate_func=average_cost) config = Configuration(cs, values={'a': 1, 'b': 2}) rh_merge.add(config=config, instance_id="inst_new", cost=10, time=20, status=StatusType.SUCCESS, seed=None, additional_info=None) # "d" is an instance in <scenario> rh_merge.add(config=config, instance_id="d", cost=5, time=20, status=StatusType.SUCCESS, seed=None, additional_info=None) # build empty rh rh_base = RunHistory(aggregate_func=average_cost) merge_foreign_data(scenario=scenario, runhistory=rh_base, in_scenario_list=[scenario_2], in_runhistory_list=[rh_merge]) # both runs should be in the runhistory # but we should not use the data to update the cost of config self.assertTrue(len(rh_base.data) == 2) self.assertTrue(np.isnan(rh_base.get_cost(config))) # we should not get direct access to external run data runs = rh_base.get_runs_for_config(config) self.assertTrue(len(runs) == 0) rh_merge.add(config=config, instance_id="inst_new_2", cost=10, time=20, status=StatusType.SUCCESS, seed=None, additional_info=None) self.assertRaises( ValueError, merge_foreign_data, **{ "scenario": scenario, "runhistory": rh_base, "in_scenario_list": [scenario_2], "in_runhistory_list": [rh_merge] })
def get_random_neighbor(configuration: Configuration, seed: int) -> Configuration: """Draw a random neighbor by changing one parameter of a configuration. * If the parameter is categorical, it changes it to another value. * If the parameter is ordinal, it changes it to the next higher or lower value. * If parameter is a float, draw a random sample If changing a parameter activates new parameters or deactivates previously active parameters, the configuration will be rejected. If more than 10000 configurations were rejected, this function raises a ValueError. Parameters ---------- configuration : Configuration seed : int Used to generate a random state. Returns ------- Configuration The new neighbor. """ random = np.random.RandomState(seed) rejected = True values = copy.deepcopy(configuration.get_dictionary()) while rejected: # First, choose an active hyperparameter active = False iteration = 0 while not active: iteration += 1 if configuration._num_hyperparameters > 1: rand_idx = random.randint(0, configuration._num_hyperparameters - 1) else: rand_idx = 0 value = configuration.get_array()[rand_idx] if np.isfinite(value): active = True hp_name = configuration.configuration_space \ .get_hyperparameter_by_idx(rand_idx) hp = configuration.configuration_space.get_hyperparameter(hp_name) # Only choose if there is a possibility of finding a neigboor if not hp.has_neighbors(): active = False if iteration > 10000: raise ValueError('Probably caught in an infinite loop.') # Get a neighboor and adapt the rest of the configuration if necessary neighbor = hp.get_neighbors(value, random, number=1, transform=True)[0] previous_value = values[hp.name] values[hp.name] = neighbor try: new_configuration = Configuration( configuration.configuration_space, values=values) rejected = False except ValueError as e: values[hp.name] = previous_value return new_configuration
def get_one_exchange_neighbourhood( configuration: Configuration, seed: int, num_neighbors: int=4, stdev: float=0.2, ) -> Generator[Configuration, None, None]: """Return all configurations in a one-exchange neighborhood. The method is implemented as defined by: Frank Hutter, Holger H. Hoos and Kevin Leyton-Brown Sequential Model-Based Optimization for General Algorithm Configuration In: Proceedings of the conference on Learning and Intelligent OptimizatioN (LION 5) """ random = np.random.RandomState(seed) hyperparameters_list = list( list(configuration.configuration_space._hyperparameters.keys()) ) hyperparameters_list_length = len(hyperparameters_list) hyperparameters_used = [hp.name for hp in configuration.configuration_space.get_hyperparameters() if hp.get_num_neighbors(configuration.get(hp.name)) == 0 and configuration.get(hp.name) is not None] number_of_usable_hyperparameters = sum(np.isfinite(configuration.get_array())) n_neighbors_per_hp = { hp.name: num_neighbors if np.isinf(hp.get_num_neighbors(configuration.get(hp.name))) else hp.get_num_neighbors(configuration.get(hp.name)) for hp in configuration.configuration_space.get_hyperparameters() } finite_neighbors_stack = {} # type: Dict configuration_space = configuration.configuration_space # type: ConfigSpaceNNI while len(hyperparameters_used) < number_of_usable_hyperparameters: index = int(random.randint(hyperparameters_list_length)) hp_name = hyperparameters_list[index] if n_neighbors_per_hp[hp_name] == 0: continue else: neighbourhood = [] number_of_sampled_neighbors = 0 array = configuration.get_array() # type: np.ndarray value = array[index] # type: float # Check for NaNs (inactive value) if value != value: continue iteration = 0 hp = configuration_space.get_hyperparameter(hp_name) # type: Hyperparameter num_neighbors_for_hp = hp.get_num_neighbors(configuration.get(hp_name)) while True: # Obtain neigbors differently for different possible numbers of # neighbors if num_neighbors_for_hp == 0: break # No infinite loops elif iteration > 100: break elif np.isinf(num_neighbors_for_hp): if number_of_sampled_neighbors >= 1: break # TODO if code becomes slow remove the isinstance! if isinstance(hp, (UniformFloatHyperparameter, UniformIntegerHyperparameter)): neighbor = hp.get_neighbors(value, random, number=1, std=stdev)[0] else: neighbor = hp.get_neighbors(value, random, number=1)[0] else: if iteration > 0: break if hp_name not in finite_neighbors_stack: neighbors = hp.get_neighbors(value, random) random.shuffle(neighbors) finite_neighbors_stack[hp_name] = neighbors else: neighbors = finite_neighbors_stack[hp_name] neighbor = neighbors.pop() # Check all newly obtained neigbors new_array = array.copy() new_array = ConfigSpaceNNI.c_util.change_hp_value( configuration_space=configuration_space, configuration_array=new_array, hp_name=hp_name, hp_value=neighbor, index=index) try: # Populating a configuration from an array does not check # if it is a legal configuration - check this (slow) new_configuration = Configuration(configuration_space, vector=new_array) # type: Configuration # Only rigorously check every tenth configuration ( # because moving around in the neighborhood should # just work!) if np.random.random() > 0.95: new_configuration.is_valid_configuration() else: configuration_space._check_forbidden(new_array) neighbourhood.append(new_configuration) except ForbiddenValueError as e: pass iteration += 1 if len(neighbourhood) > 0: number_of_sampled_neighbors += 1 # Some infinite loop happened and no valid neighbor was found OR # no valid neighbor is available for a categorical if len(neighbourhood) == 0: hyperparameters_used.append(hp_name) n_neighbors_per_hp[hp_name] = 0 hyperparameters_used.append(hp_name) else: if hp_name not in hyperparameters_used: n_ = neighbourhood.pop() n_neighbors_per_hp[hp_name] -= 1 if n_neighbors_per_hp[hp_name] == 0: hyperparameters_used.append(hp_name) yield n_
def test_check_configuration(self): # TODO this is only a smoke test # TODO actually, this rather tests the evaluate methods in the # conditions module! cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("parent", [0, 1]) cs.add_hyperparameter(hp1) hp2 = UniformIntegerHyperparameter("child", 0, 10) cs.add_hyperparameter(hp2) cond1 = EqualsCondition(hp2, hp1, 0) cs.add_condition(cond1) # This automatically checks the configuration! Configuration(cs, dict(parent=0, child=5)) # and now for something more complicated cs = ConfigurationSpace() hp1 = CategoricalHyperparameter("input1", [0, 1]) cs.add_hyperparameter(hp1) hp2 = CategoricalHyperparameter("input2", [0, 1]) cs.add_hyperparameter(hp2) hp3 = CategoricalHyperparameter("input3", [0, 1]) cs.add_hyperparameter(hp3) hp4 = CategoricalHyperparameter("input4", [0, 1]) cs.add_hyperparameter(hp4) hp5 = CategoricalHyperparameter("input5", [0, 1]) cs.add_hyperparameter(hp5) hp6 = Constant("AND", "True") cs.add_hyperparameter(hp6) cond1 = EqualsCondition(hp6, hp1, 1) cond2 = NotEqualsCondition(hp6, hp2, 1) cond3 = InCondition(hp6, hp3, [1]) cond4 = EqualsCondition(hp6, hp4, 1) cond5 = EqualsCondition(hp6, hp5, 1) conj1 = AndConjunction(cond1, cond2) conj2 = OrConjunction(conj1, cond3) conj3 = AndConjunction(conj2, cond4, cond5) cs.add_condition(conj3) expected_outcomes = [ False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, True, False, False, False, True, False, False, False, True, False, False, False, False, False, False, False, True ] for idx, values in enumerate(product([0, 1], repeat=5)): # The hyperparameters aren't sorted, but the test assumes them to # be sorted. hyperparameters = sorted(cs.get_hyperparameters(), key=lambda t: t.name) instantiations = { hyperparameters[jdx + 1].name: values[jdx] for jdx in range(len(values)) } evaluation = conj3.evaluate(instantiations) self.assertEqual(expected_outcomes[idx], evaluation) if evaluation == False: self.assertRaisesRegexp( ValueError, "Inactive hyperparameter 'AND' must " "not be specified, but has the vector value: " "'0.0'.", Configuration, cs, values={ "input1": values[0], "input2": values[1], "input3": values[2], "input4": values[3], "input5": values[4], "AND": "True" }) else: Configuration(cs, values={ "input1": values[0], "input2": values[1], "input3": values[2], "input4": values[3], "input5": values[4], "AND": "True" })