def test_sample_types_no_cat(self): import numpy as np from deephyper.evaluator import Evaluator from deephyper.problem import HpProblem from deephyper.search.hps import CBO problem = HpProblem() problem.add_hyperparameter((0, 10), "x_int") problem.add_hyperparameter((0.0, 10.0), "x_float") def run(config): assert np.issubdtype(type(config["x_int"]), np.integer) assert np.issubdtype(type(config["x_float"]), float) return 0 create_evaluator = lambda: Evaluator.create(run, method="serial") CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY").search(10) CBO(problem, create_evaluator(), random_state=42, surrogate_model="RF").search(10)
def test_gp(self): from deephyper.evaluator import Evaluator from deephyper.problem import HpProblem from deephyper.search.hps import CBO # test float hyperparameters problem = HpProblem() problem.add_hyperparameter((0.0, 10.0), "x") def run(config): return config["x"] CBO( problem, Evaluator.create(run, method="serial"), random_state=42, surrogate_model="GP", ).search(10) # test int hyperparameters problem = HpProblem() problem.add_hyperparameter((0, 10), "x") def run(config): return config["x"] CBO( problem, Evaluator.create(run, method="serial"), random_state=42, surrogate_model="GP", ).search(10) # test categorical hyperparameters problem = HpProblem() problem.add_hyperparameter([f"{i}" for i in range(10)], "x") def run(config): return int(config["x"]) CBO( problem, Evaluator.create(run, method="serial"), random_state=42, surrogate_model="GP", ).search(10)
def test_quickstart(self): from deephyper.problem import HpProblem from deephyper.search.hps import CBO from deephyper.evaluator import Evaluator # define the variable you want to optimize problem = HpProblem() problem.add_hyperparameter((-10.0, 10.0), "x") # define the evaluator to distribute the computation evaluator = Evaluator.create( run, method="subprocess", method_kwargs={ "num_workers": 2, }, ) # define you search and execute it search = CBO(problem, evaluator) results = search.search(max_evals=15)
def test_sample_types_conditional(self): import ConfigSpace as cs import numpy as np from deephyper.evaluator import Evaluator from deephyper.problem import HpProblem from deephyper.search.hps import CBO problem = HpProblem() # choices choice = problem.add_hyperparameter( name="choice", value=["choice1", "choice2"], ) # integers x1_int = problem.add_hyperparameter(name="x1_int", value=(1, 10)) x2_int = problem.add_hyperparameter(name="x2_int", value=(1, 10)) # conditions cond_1 = cs.EqualsCondition(x1_int, choice, "choice1") cond_2 = cs.EqualsCondition(x2_int, choice, "choice2") problem.add_condition(cond_1) problem.add_condition(cond_2) def run(config): if config["choice"] == "choice1": assert np.issubdtype(type(config["x1_int"]), np.integer) else: assert np.issubdtype(type(config["x2_int"]), np.integer) return 0 create_evaluator = lambda: Evaluator.create(run, method="serial") CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY").search(10)
from deephyper.search.hps import CBO from deephyper.evaluator import Evaluator from deephyper.evaluator.callback import TqdmCallback results = {} max_evals = 30 for failure_strategy in ["ignore", "mean", "min"]: # for failure_strategy in ["min"]: print(f"Executing failure strategy: {failure_strategy}") evaluator = Evaluator.create( run, method="serial", method_kwargs={"callbacks": [TqdmCallback(max_evals)]} ) search = CBO( problem, evaluator, filter_failures=failure_strategy, log_dir=f"search_{failure_strategy}", random_state=42, ) results[failure_strategy] = search.search(max_evals) # %% # Finally we plot the collected results import matplotlib.pyplot as plt import numpy as np plt.figure() for i, (failure_strategy, df) in enumerate(results.items()): plt.subplot(3, 1, i + 1) if df.objective.dtype != np.float64:
def test_random_seed(self): import numpy as np from deephyper.evaluator import Evaluator from deephyper.problem import HpProblem from deephyper.search.hps import CBO problem = HpProblem() problem.add_hyperparameter((0.0, 10.0), "x") def run(config): return config["x"] create_evaluator = lambda: Evaluator.create(run, method="serial") search = CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY") res1 = search.search(max_evals=4) res1_array = res1[["x"]].to_numpy() search = CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY") res2 = search.search(max_evals=4) res2_array = res2[["x"]].to_numpy() assert np.array_equal(res1_array, res2_array) # test multi-objective def run(config): return config["x"], config["x"] create_evaluator = lambda: Evaluator.create(run, method="serial") search = CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY") res1 = search.search(max_evals=4) res1_array = res1[["x"]].to_numpy() search = CBO(problem, create_evaluator(), random_state=42, surrogate_model="DUMMY") res2 = search.search(max_evals=4) res2_array = res2[["x"]].to_numpy() assert np.array_equal(res1_array, res2_array)
from deephyper.evaluator.callback import TqdmCallback from deephyper.search.hps import CBO timeout = 20 num_workers = 4 results = {} evaluator = Evaluator.create( black_box.run_ackley, method="process", method_kwargs={ "num_workers": num_workers, "callbacks": [TqdmCallback()], }, ) search = CBO(problem, evaluator, random_state=42) results = search.search(timeout=timeout) # %% # Finally, we plot the results from the collected DataFrame. if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np def compile_profile(df): """Take the results dataframe as input and return the number of jobs running at a given timestamp.""" history = [] for _, row in df.iterrows(): history.append((row["timestamp_start"], 1)) history.append((row["timestamp_end"], -1))
# Then we define serial search by creation a ``"serial"``-evaluator and we execute the search with a fixed time-budget of 2 minutes (i.e., 120 secondes). if __name__ == "__main__": from deephyper.evaluator import Evaluator from deephyper.evaluator.callback import TqdmCallback from deephyper.search.hps import CBO # we give a budget of 2 minutes for each search timeout = 120 serial_evaluator = Evaluator.create( black_box.run_ackley, method="serial", method_kwargs={"callbacks": [TqdmCallback()]}, ) results = {} serial_search = CBO(problem, serial_evaluator, random_state=42) results["serial"] = serial_search.search(timeout=timeout) # %% # After, executing the serial-search for 2 minutes we can create a parallel search which uses the ``"process"``-evaluator and defines 5 parallel workers. The search is also executed for 2 minutes. if __name__ == "__main__": parallel_evaluator = Evaluator.create( black_box.run_ackley, method="process", method_kwargs={ "num_workers": 5, "callbacks": [TqdmCallback()] }, ) parallel_search = CBO(problem, parallel_evaluator, random_state=42)
for i in range(N): problem_large.add_hyperparameter((-10.0, 10.0), f"x{i}") problem_large # %% # Then, we define setup the search and execute it: from deephyper.evaluator import Evaluator from deephyper.evaluator.callback import TqdmCallback from deephyper.search.hps import CBO results = {} max_evals = 20 evaluator_small = Evaluator.create( run_small, method="serial", method_kwargs={"callbacks": [TqdmCallback(max_evals)]} ) search_small = CBO(problem_small, evaluator_small, random_state=42) results["Small"] = search_small.search(max_evals) # %% evaluator_large = Evaluator.create( run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback(max_evals)]} ) search_large = CBO(problem_large, evaluator_large, random_state=42) results["Large"] = search_large.search(max_evals) # %% evaluator_large_tl = Evaluator.create( run_large, method="serial", method_kwargs={"callbacks": [TqdmCallback(max_evals)]} ) search_large_tl = CBO(problem_large, evaluator_large_tl, random_state=42) search_large_tl.fit_generative_model(results["Large"])