def test_bayesian_save_reload(tmp_dir): hps = hp_module.HyperParameters() hps.Choice('a', [1, 2], default=1) hps.Choice('b', [3, 4], default=3) hps.Choice('c', [5, 6], default=5) hps.Choice('d', [7, 8], default=7) hps.Choice('e', [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', 'max'), max_trials=20, hyperparameters=hps) oracle._set_project_dir(tmp_dir, 'untitled') for _ in range(3): trial = oracle.create_trial('tuner_id') oracle.update_trial(trial.trial_id, {'score': 1.}) oracle.end_trial(trial.trial_id, "COMPLETED") oracle.save() oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', 'max'), max_trials=20, hyperparameters=hps) oracle._set_project_dir(tmp_dir, 'untitled') oracle.reload() for trial_id in range(3): trial = oracle.create_trial('tuner_id') oracle.update_trial(trial.trial_id, {'score': 1.}) oracle.end_trial(trial.trial_id, "COMPLETED") assert len(oracle.trials) == 6
def test_bayesian_save_reload(tmp_dir): hps = hp_module.HyperParameters() hps.Choice("a", [1, 2], default=1) hps.Choice("b", [3, 4], default=3) hps.Choice("c", [5, 6], default=5) hps.Choice("d", [7, 8], default=7) hps.Choice("e", [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps ) oracle._set_project_dir(tmp_dir, "untitled") for _ in range(3): trial = oracle.create_trial("tuner_id") oracle.update_trial(trial.trial_id, {"score": 1.0}) oracle.end_trial(trial.trial_id, "COMPLETED") oracle.save() oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps ) oracle._set_project_dir(tmp_dir, "untitled") oracle.reload() for trial_id in range(3): trial = oracle.create_trial("tuner_id") oracle.update_trial(trial.trial_id, {"score": 1.0}) oracle.end_trial(trial.trial_id, "COMPLETED") assert len(oracle.trials) == 6
def test_bayesian_oracle_maximize(tmp_dir): hps = hp_module.HyperParameters() hps.Int("a", -100, 100) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", direction="max"), max_trials=20, hyperparameters=hps, num_initial_points=2, ) oracle._set_project_dir(tmp_dir, "untitled") # Make examples with high 'a' and high score. for i in range(5): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values["a"] = 10 * i trial.score = i trial.status = "COMPLETED" oracle.trials[trial.trial_id] = trial # Make examples with low 'a' and low score for i in range(5): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values["a"] = -10 * i trial.score = -i trial.status = "COMPLETED" oracle.trials[trial.trial_id] = trial trial = oracle.create_trial("tuner0") assert trial.status == "RUNNING" # Assert that the oracle suggests hps it thinks will maximize. assert trial.hyperparameters.get("a") > 0
def test_hyperparameters_added(tmp_dir): hps = hp_module.HyperParameters() hps.Int("a", -100, 100) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", direction="max"), max_trials=20, hyperparameters=hps, num_initial_points=2, ) oracle._set_project_dir(tmp_dir, "untitled") # Populate initial trials. for i in range(10): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values["a"] = 10 * i trial.score = i trial.status = "COMPLETED" oracle.trials[trial.trial_id] = trial # Update the space. new_hps = hp_module.HyperParameters() new_hps.Float("b", 3.2, 6.4, step=0.2, default=3.6) new_hps.Boolean("c", default=True) oracle.update_space(new_hps) # Make a new trial, it should have b set. trial = oracle.create_trial("tuner0") assert trial.status == "RUNNING" assert "b" in trial.hyperparameters.values assert "c" in trial.hyperparameters.values
def test_bayesian_oracle_maximize(tmp_dir): hps = hp_module.HyperParameters() hps.Int('a', -100, 100) oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', direction='max'), max_trials=20, hyperparameters=hps, num_initial_points=2) oracle._set_project_dir(tmp_dir, 'untitled') # Make examples with high 'a' and high score. for i in range(5): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values['a'] = 10 * i trial.score = i trial.status = 'COMPLETED' oracle.trials[trial.trial_id] = trial # Make examples with low 'a' and low score for i in range(5): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values['a'] = -10 * i trial.score = -i trial.status = 'COMPLETED' oracle.trials[trial.trial_id] = trial trial = oracle.create_trial('tuner0') assert trial.status == 'RUNNING' # Assert that the oracle suggests hps it thinks will maximize. assert trial.hyperparameters.get('a') > 0
def test_hyperparameters_added(tmp_dir): hps = hp_module.HyperParameters() hps.Int('a', -100, 100) oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', direction='max'), max_trials=20, hyperparameters=hps, num_initial_points=2) oracle._set_project_dir(tmp_dir, 'untitled') # Populate initial trials. for i in range(10): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values['a'] = 10 * i trial.score = i trial.status = 'COMPLETED' oracle.trials[trial.trial_id] = trial # Update the space. new_hps = hp_module.HyperParameters() new_hps.Float('b', 3.2, 6.4, step=0.2, default=3.6) new_hps.Boolean('c', default=True) oracle.update_space(new_hps) # Make a new trial, it should have b set. trial = oracle.create_trial('tuner0') assert trial.status == 'RUNNING' assert 'b' in trial.hyperparameters.values assert 'c' in trial.hyperparameters.values
def test_save_before_result(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1), hp_module.Int('b', 3, 10, default=3), hp_module.Float('c', 0, 1, 0.1, default=0), hp_module.Fixed('d', 7), hp_module.Choice('e', [9, 0], default=9)] oracle = bo_module.BayesianOptimizationOracle() oracle.populate_space(str(1), hp_list) oracle.save(os.path.join(tmp_dir, 'temp_oracle')) oracle.result(str(1), 0)
def test_bayesian_oracle_with_zero_y(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1), hp_module.Int('b', 3, 10, default=3), hp_module.Float('c', 0, 1, 0.1, default=0), hp_module.Fixed('d', 7), hp_module.Choice('e', [9, 0], default=9)] oracle = bo_module.BayesianOptimizationOracle() for i in range(100): oracle.populate_space(str(i), hp_list) oracle.result(str(i), 0)
def test_bayesian_save_reload(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1), hp_module.Choice('b', [3, 4], default=3), hp_module.Choice('c', [5, 6], default=5), hp_module.Choice('d', [7, 8], default=7), hp_module.Choice('e', [9, 0], default=9)] oracle = bo_module.BayesianOptimizationOracle() for trial_id in range(3): oracle.populate_space('0_' + str(trial_id), hp_list) for trial_id in range(2): oracle.result('0_' + str(trial_id), trial_id) fname = os.path.join(tmp_dir, 'oracle') oracle.save(fname) oracle = bo_module.BayesianOptimizationOracle() oracle.reload(fname) for trial_id in range(20): hp = oracle.populate_space('1_' + str(trial_id), hp_list) assert hp['status'] == 'RUN'
def test_save_before_result(tmp_dir): hps = hp_module.HyperParameters() hps.Choice('a', [1, 2], default=1) hps.Int('b', 3, 10, default=3) hps.Float('c', 0, 1, 0.1, default=0) hps.Fixed('d', 7) hps.Choice('e', [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle( objective='score', max_trials=10, hyperparameters=hps) oracle._set_project_dir(tmp_dir, 'untitled') oracle._populate_space(str(1)) oracle.save()
def test_save_before_result(tmp_dir): hps = hp_module.HyperParameters() hps.Choice("a", [1, 2], default=1) hps.Int("b", 3, 10, default=3) hps.Float("c", 0, 1, 0.1, default=0) hps.Fixed("d", 7) hps.Choice("e", [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", "max"), max_trials=10, hyperparameters=hps ) oracle._set_project_dir(tmp_dir, "untitled") oracle.populate_space(str(1)) oracle.save()
def test_bayesian_dynamic_space(tmp_dir): hp_list = [hp_module.Choice('a', [1, 2], default=1)] oracle = bo_module.BayesianOptimizationOracle() for i in range(10): oracle.populate_space(str(i), hp_list) oracle.result(str(i), i) hp_list.append(hp_module.Int('b', 3, 10, default=3)) assert 'b' in oracle.populate_space('1_0', hp_list)['values'] hp_list.append(hp_module.Float('c', 0, 1, 0.1, default=0)) assert 'c' in oracle.populate_space('1_1', hp_list)['values'] hp_list.append(hp_module.Fixed('d', 7)) assert 'd' in oracle.populate_space('1_2', hp_list)['values'] hp_list.append(hp_module.Choice('e', [9, 0], default=9)) assert 'e' in oracle.populate_space('1_3', hp_list)['values']
def test_bayesian_oracle_with_zero_y(tmp_dir): hps = hp_module.HyperParameters() hps.Choice('a', [1, 2], default=1) hps.Int('b', 3, 10, default=3) hps.Float('c', 0, 1, 0.1, default=0) hps.Fixed('d', 7) hps.Choice('e', [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle( objective='score', max_trials=20, hyperparameters=hps) oracle._set_project_dir(tmp_dir, 'untitled') for i in range(5): trial = oracle.create_trial(str(i)) oracle.update_trial(trial.trial_id, {'score': 0}) oracle.end_trial(trial.trial_id, "COMPLETED")
def test_bayesian_dynamic_space(tmp_dir): hps = hp_module.HyperParameters() hps.Choice('a', [1, 2], default=1) oracle = bo_module.BayesianOptimizationOracle( objective='val_acc', max_trials=20) oracle._set_project_dir(tmp_dir, 'untitled') oracle.hyperparameters = hps for i in range(10): oracle._populate_space(str(i)) hps.Int('b', 3, 10, default=3) assert 'b' in oracle._populate_space('1_0')['values'] hps.Float('c', 0, 1, 0.1, default=0) assert 'c' in oracle._populate_space('1_1')['values'] hps.Fixed('d', 7) assert 'd' in oracle._populate_space('1_2')['values'] hps.Choice('e', [9, 0], default=9) assert 'e' in oracle._populate_space('1_3')['values']
def test_bayesian_dynamic_space(tmp_dir): hps = hp_module.HyperParameters() hps.Choice("a", [1, 2], default=1) oracle = bo_module.BayesianOptimizationOracle( objective="val_acc", max_trials=20, num_initial_points=10 ) oracle._set_project_dir(tmp_dir, "untitled") oracle.hyperparameters = hps for i in range(10): oracle.populate_space(str(i)) hps.Int("b", 3, 10, default=3) assert "b" in oracle.populate_space("1_0")["values"] hps.Float("c", 0, 1, 0.1, default=0) assert "c" in oracle.populate_space("1_1")["values"] hps.Fixed("d", 7) assert "d" in oracle.populate_space("1_2")["values"] hps.Choice("e", [9, 0], default=9) assert "e" in oracle.populate_space("1_3")["values"]
def test_bayesian_oracle_with_zero_y(tmp_dir): hps = hp_module.HyperParameters() hps.Choice("a", [1, 2], default=1) hps.Int("b", 3, 10, default=3) hps.Float("c", 0, 1, 0.1, default=0) hps.Fixed("d", 7) hps.Choice("e", [9, 0], default=9) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", "max"), max_trials=20, num_initial_points=2, hyperparameters=hps, ) oracle._set_project_dir(tmp_dir, "untitled") for i in range(5): trial = oracle.create_trial(str(i)) oracle.update_trial(trial.trial_id, {"score": 0}) oracle.end_trial(trial.trial_id, "COMPLETED")
def test_distributed_optimization(tmp_dir): hps = hp_module.HyperParameters() hps.Int('a', 0, 10) hps.Float('b', -1, 1, step=0.1) hps.Float('c', 1e-5, 1e-2, sampling='log') def evaluate(hp): # Minimum at a=4, b=1, c=1e-3 with score=-1 return abs(hp['a'] - 4) - hp['b'] + 0.1 * abs(3 + math.log(hp['c'], 10)) oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', 'min'), hyperparameters=hps, max_trials=60) oracle._set_project_dir(tmp_dir, 'untitled') tuners = 4 for _ in range(10): trials = [] for i in range(tuners): trial = oracle.create_trial('tuner_' + str(i)) trials.append(trial) for trial in trials: oracle.update_trial(trial.trial_id, {'score': evaluate(trial.hyperparameters)}) for trial in trials: oracle.end_trial(trial.trial_id, 'COMPLETED') atol, rtol = 1e-1, 1e-1 best_trial = oracle.get_best_trials()[0] best_hps = best_trial.hyperparameters # The minimum is not always found but it is always close. assert best_trial.score < -0.8, best_hps.values assert np.isclose(best_hps['a'], 4, atol=atol, rtol=rtol) assert np.isclose(best_hps['b'], 1, atol=atol, rtol=rtol) # For log-scale param, just check that the order of magnitude is correct. log_best_c = math.log(best_hps['c'], 10) assert log_best_c > -4 and log_best_c < -2
def test_step_respected(tmp_dir): hps = hp_module.HyperParameters() hps.Float('c', 0, 10, step=3) oracle = bo_module.BayesianOptimizationOracle(objective=kt.Objective( 'score', direction='max'), max_trials=20, hyperparameters=hps, num_initial_points=2) oracle._set_project_dir(tmp_dir, 'untitled') # Populate initial trials. for i in range(10): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values['c'] = 3. trial.score = i trial.status = 'COMPLETED' oracle.trials[trial.trial_id] = trial trial = oracle.create_trial('tuner0') # Check that oracle respects the `step` param. assert trial.hyperparameters.get('c') in {0, 3, 6, 9}
def test_step_respected(tmp_dir): hps = hp_module.HyperParameters() hps.Float("c", 0, 10, step=3) oracle = bo_module.BayesianOptimizationOracle( objective=kt.Objective("score", direction="max"), max_trials=20, hyperparameters=hps, num_initial_points=2, ) oracle._set_project_dir(tmp_dir, "untitled") # Populate initial trials. for i in range(10): trial = trial_module.Trial(hyperparameters=hps.copy()) trial.hyperparameters.values["c"] = 3.0 trial.score = i trial.status = "COMPLETED" oracle.trials[trial.trial_id] = trial trial = oracle.create_trial("tuner0") # Check that oracle respects the `step` param. assert trial.hyperparameters.get("c") in {0, 3, 6, 9}