def testExpandSearchSpace(self): exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}} space = SearchSpace([ DiscreteSpace("a.b.c", [1, 2]), DiscreteSpace("a.d", ["a", "b"]), ]) searcher = GridSearch(space, "reward") searcher.add_configurations(exp) trials = searcher.next_trials() self.assertEqual(len(trials), 4) self.assertTrue(trials[0].config["a"]["b"]["c"] in [1, 2]) self.assertTrue(trials[1].config["a"]["d"] in ["a", "b"])
def testExpandSearchSpace(self): exp = {"test-exp": {"run": "f1", "config": {"a": {'d': 'dummy'}}}} space = SearchSpace([ DiscreteSpace('a.b.c', [1, 2]), DiscreteSpace('a.d', ['a', 'b']), ]) searcher = GridSearch(space, 'reward') searcher.add_configurations(exp) trials = searcher.next_trials() self.assertEqual(len(trials), 4) self.assertTrue(trials[0].config['a']['b']['c'] in [1, 2]) self.assertTrue(trials[1].config['a']['d'] in ['a', 'b'])
def testSearchRound(self): exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}} space = SearchSpace([ DiscreteSpace("a.b.c", [1, 2]), DiscreteSpace("a.d", ["a", "b"]), ]) searcher = GridSearch(space, "reward") searcher.add_configurations(exp) trials = searcher.next_trials() self.assertEqual(len(searcher.next_trials()), 0) for trial in trials[1:]: searcher.on_trial_complete(trial.trial_id) searcher.on_trial_complete(trials[0].trial_id, error=True) self.assertTrue(searcher.is_finished())
def testBestTrial(self): exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}} space = SearchSpace([ DiscreteSpace("a.b.c", [1, 2]), DiscreteSpace("a.d", ["a", "b"]), ]) searcher = GridSearch(space, "reward") searcher.add_configurations(exp) trials = searcher.next_trials() self.assertEqual(len(searcher.next_trials()), 0) for i, trial in enumerate(trials): rewards = list(range(i, i + 10)) random.shuffle(rewards) for reward in rewards: searcher.on_trial_result(trial.trial_id, {"reward": reward}) best_trial = searcher.get_best_trial() self.assertEqual(best_trial, trials[-1]) self.assertEqual(best_trial.best_result["reward"], 3 + 10 - 1)
def testBestTrial(self): exp = {"test-exp": {"run": "f1", "config": {"a": {'d': 'dummy'}}}} space = SearchSpace([ DiscreteSpace('a.b.c', [1, 2]), DiscreteSpace('a.d', ['a', 'b']), ]) searcher = GridSearch(space, 'reward') searcher.add_configurations(exp) trials = searcher.next_trials() self.assertEqual(len(searcher.next_trials()), 0) for i, trial in enumerate(trials): rewards = [x for x in range(i, i + 10)] random.shuffle(rewards) for reward in rewards: searcher.on_trial_result(trial.trial_id, {"reward": reward}) best_trial = searcher.get_best_trial() self.assertEqual(best_trial, trials[-1]) self.assertEqual(best_trial.best_result['reward'], 3 + 10 - 1)
if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() ray.init(redirect_output=True) register_trainable("exp", michalewicz_function) space = SearchSpace({ ContinuousSpace('x1', 0, 4, 100), ContinuousSpace('x2', -2, 2, 100), ContinuousSpace('x3', 1, 5, 100), ContinuousSpace('x4', -3, 3, 100), DiscreteSpace('x5', [-1, 0, 1, 2, 3]), }) config = { "my_exp": { "run": "exp", "stop": { "training_iteration": 100 }, } } algo = GeneticSearch(space, reward_attr="neg_mean_loss", max_generation=2 if args.smoke_test else 10,
def gen_al_space(self): """Create space of hyperparameters for the genetic algorithm optimizer. This function creates the space of hyperparameter following ray.tune.automl syntax. Parameters: hyper_to_opt (dict): dictionary containing the configuration of the hyperparameters to optimize. This dictionary must follow the next syntax: .. code:: python hyper_to_opt = {'hyperparam_1': {'type': ..., 'range: ..., 'step': ...}, 'hyperparam_2': {'type': ..., 'range: ..., 'step': ...}, ... } See the oficial documentation for more details. Returns: ray.tune.automl.search_space.SearchSpace: space of hyperparameters following the syntax required by the genetic algorithm optimizer. Example:: hyper_top_opt = { 'cnn_rnn_dropout':{ 'type': 'uniform', 'range': [0,1]}, 'optimizer_type':{ 'type': 'choice',, 'range': ['Adadelta', 'Adam', 'RMSProp', 'SGD']}, 'layer1_filters':{ 'type': 'quniform', 'range': [16, 64], 'step': 1}} Raises: KeyError: if ``type`` is other than ``uniform``, ``quniform`` or ``choice``. """ space = [] # loop over the hyperparameters to optimize dictionary and add each # hyperparameter to the space for key, item in self.hyperparams_to_optimize.items(): if item['type'] == 'uniform': space.append( ContinuousSpace(key, item['range'][0], item['range'][1], (item['range'][0] - item['range'][1]) * 100)) elif item['type'] == 'quniform': space.append( DiscreteSpace( key, list( range(item['range'][0], item['range'][1] + item['step'], item['step'])))) elif item['type'] == 'choice': space.append(DiscreteSpace(key, item['range'])) else: raise KeyError('Genetic algorithm optimization only supports \ uniform, quniform and choice space types') return SearchSpace(space)
# Negate y since we want to minimize y value tune.report(timesteps_total=1, neg_mean_loss=-y) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() space = SearchSpace({ ContinuousSpace("x1", 0, 4, 100), ContinuousSpace("x2", -2, 2, 100), ContinuousSpace("x3", 1, 5, 100), ContinuousSpace("x4", -3, 3, 100), DiscreteSpace("x5", [-1, 0, 1, 2, 3]), }) algo = GeneticSearch( space, reward_attr="neg_mean_loss", max_generation=2 if args.smoke_test else 10, population_size=10 if args.smoke_test else 50) scheduler = AsyncHyperBandScheduler() analysis = tune.run( michalewicz_function, metric="neg_mean_loss", mode="max", name="my_exp",