def test_recording(self): space = { "x": ["hp_loggrid_uniform", "x", -2.0, 2.0, 1.0], "y": ["hp_loggrid_uniform", "y", -2.0, 2.0, 1.0], } hpo = Hyperopt(space=space, method="tpe") state = State(search=hpo) results = [] for i in range(50): tid, suggestion = state.suggest() results.append((tid, { "ok": { "loss": target(suggestion), "suggestion": suggestion } })) random.shuffle(results) for tid, result in results: state.submit(tid, result) results = [] for i in range(10): tid, suggestion = state.suggest() results.append((tid, { "ok": { "loss": target(suggestion), "suggestion": suggestion } })) for i in range(10): tid, suggestion = state.suggest() random.shuffle(results) for tid, result in results: state.submit(tid, result) hpo2 = Hyperopt(space=space, method="tpe") state2 = State.from_tape(search=hpo2, tape=state.tape) for i in range(10): next_tid, next_suggestion = state.suggest() next_tid2, next_suggestion2 = state2.suggest() self.assertEqual(next_suggestion2, next_suggestion) self.assertEqual(next_tid2, next_tid) self.assertEqual(state2.tape.raw, state.tape.raw) self.assertEqual(state2.live_trials, state.live_trials) self.assertEqual(state2.trials.db, state.trials.db) # does the evals db do roughly what we expect it to? i.e. be # more unique than the trial one. self.assertGreater(len(state2.trials), len(state2.evals))
def test_basic(self): space = { "x": ["hp_choice", "x", np.linspace(-10, 10, num=21)], "y": ["hp_choice", "y", np.linspace(-10, 10, num=21)], } hpo = Hyperopt(space=space, method="tpe") tape = [] # does it do anything? tape = run_n_steps(hpo, 2, tape) start = best_loss(tape) tape = run_n_steps(hpo, 200, tape) end = best_loss(tape) self.assertGreater(start, end) # test repeatability tid, next_suggestion = hpo.suggest() hpo2 = Hyperopt(space=space, method="tpe") for entry in tape: if entry[0] == "suggest": tid, s = hpo2.suggest() self.assertEqual(tid, entry[1]) self.assertEqual(s, entry[2]) else: hpo2.submit(entry[1], loss=entry[2]) tid, next_suggestion2 = hpo2.suggest() self.assertEqual(next_suggestion, next_suggestion2)
def test_quadratic(self): space = { "x": ["hp_loggrid", "x", -2.0, 2.0, 161], "y": ["hp_loggrid", "y", -2.0, 2.0, 161], "z": ["hp_loggrid", "z", -2.0, 2.0, 161], "a": ["hp_loggrid", "a", -2.0, 2.0, 161], "b": ["hp_loggrid", "b", -2.0, 2.0, 161], "c": ["hp_loggrid", "c", -2.0, 2.0, 161], "wait": ["hp_choice", "wait", [0.0, 0.005, 0.01, 0.1]], } search = Hyperopt(space=space) run = Run( search=search, evaluator=MockEvaluator(), stop={"stop_max": { "count": 50 }}, context={"max_workers": 25}, trial_timeout=0.08, ) run.prepare(directory=self.tmpdir) run() run2 = Run.restore(directory=run.work_directory, new_stop={"stop_max": { "count": 100 }}) run2.run() run3 = Run.checkout(directory=run.work_directory) self.assertEqual(run2.state.evals.db, run2.state.evals.db)
def test_finds_min_uniform(self): space = {"x": ["hp_uniform", "x", -4.0, 4.0], "y": ["hp_uniform", "y", -4.0, 4.0]} tape = [] hpo = Hyperopt(space=space, method="rand") run_n_steps(hpo, 200, tape) end = best_loss(tape) print(end) print(best_suggestion(tape)) self.assertGreater(0.1, end)
def test_quadratic(self): # this is a somewhat redundant test, but it'll put the # pool through a slightly more realistic situation and hopefully # show us when and where it fails. space = { "x": ["hp_loggrid", "x", -2.0, 2.0, 161], "y": ["hp_loggrid", "y", -2.0, 2.0, 161], "z": ["hp_loggrid", "z", -2.0, 2.0, 161], "a": ["hp_loggrid", "a", -2.0, 2.0, 161], "b": ["hp_loggrid", "b", -2.0, 2.0, 161], "c": ["hp_loggrid", "c", -2.0, 2.0, 161], "wait": ["hp_choice", "wait", [0.0, 0.1]], } hpo = Hyperopt(space=space) pool = EvaluationPool( max_workers=40, evaluator_config={"mock_eval2": {}}, trial_timeout=0.07, ) search = Hyperopt(space=space) tasks = {} for i in range(10): t, s = hpo.suggest() tasks[t] = s futures = {pool.schedule(s): t for t, s in tasks.items()} for d in futures: pool.finish(d) pool.shutdown() self.assertEqual(len(pool.evals), 10)
def test_recording_fails_if_different(self): space = { "x": ["hp_uniform", "x", -4.0, 4.0], "y": ["hp_uniform", "y", -4.0, 4.0] } hpo = Hyperopt(space=space, method="tpe") state = State(search=hpo) results = [] for i in range(50): tid, suggestion = state.suggest() results.append((tid, { "ok": { "loss": target(suggestion), "suggestion": suggestion } })) random.shuffle(results) for tid, result in results: state.submit(tid, result) # different seed fails with self.assertRaises(AssertionError): hpo2 = Hyperopt(space=space, method="tpe", seed=1) state2 = State.from_tape(search=hpo2, tape=state.tape) # entry missing fails with self.assertRaises(AssertionError): raw_tape = copy.copy(state.tape.raw) del raw_tape[21] hpo2 = Hyperopt(space=space, method="tpe") State.from_tape(search=hpo2, tape=Tape(metadata={}, backend="list", tape=raw_tape))
def test_finds_min_loggrid(self): space = { "x": ["hp_loggrid", "x", -2.0, 2.0, 81], "y": ["hp_loggrid", "y", -2.0, 2.0, 81], } tape = [] hpo = Hyperopt(space=space, method="rand") run_n_steps(hpo, 200, tape) end = best_loss(tape) print(end) print(best_suggestion(tape)) self.assertGreater(0.1, end)
def test_finds_min_loggrid_uniform(self): space = { "x": ["hp_loggrid_uniform", "x", -2.0, 2.0, 0.05], "y": ["hp_loggrid_uniform", "y", -2.0, 2.0, 0.05], } tape = [] hpo = Hyperopt(space=space, method="tpe") run_n_steps(hpo, 50, tape) end = best_loss(tape) print(end) print(best_suggestion(tape)) self.assertGreater(0.1, end)
def setUp(self): space = { "x": ["hp_loggrid_uniform", "x", -2.0, 2.0, 1.0], "y": ["hp_loggrid_uniform", "y", -2.0, 2.0, 1.0], } hpo = Hyperopt(space=space, method="tpe") state = State(search=hpo) results = [] for i in range(50): tid, suggestion = state.suggest() results.append( (tid, {"ok": {"loss": target(suggestion), "suggestion": suggestion}}) ) for tid, result in results: state.submit(tid, result) self.state = state self.n_evals = len(state.evals) self.n_trials = len(state.trials)
def test_finds_min_choice(self): space = { "x": [ "hp_choice", "x", np.linspace(-4, 4, num=41), ], # reduce search space to make it easier "y": [ "hp_choice", "y", np.linspace(-4, 4, num=41), ], # reduce search space to make it easier } tape = [] hpo = Hyperopt(space=space, method="tpe") run_n_steps(hpo, 50, tape) end = best_loss(tape) print(end) print(best_suggestion(tape)) self.assertGreater(0.1, end)