def main(): for synthetic_function_cls in synthetic_functions: meta_info = synthetic_function_cls.get_meta_information() if "num_function_evals" in meta_info: max_iter = meta_info["num_function_evals"] else: max_iter = 200 # 构造超参空间 config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() # 定义目标函数 def evaluation(config: dict): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"] # 对experiment_param的删除等操作放在存储后面 res = pd.DataFrame(columns=[f"trial-{i}" for i in range(10)], index=range(max_iter)) for trial in range(10): random_state = 50 + trial * 10 # 设置超参空间的随机种子(会影响后面的采样) config_space.seed(random_state) print("==========================") print(f"= Trial -{trial:01d}- =") print("==========================") print('iter | loss | config origin') print('----------------------------') cg = SamplingSortOptimizer(config_space, [1], min_points_in_model=25, n_samples=2500) loss = np.inf for ix in range(max_iter): config, config_info = cg.get_config(1) cur_loss = evaluation(config) loss = min(loss, cur_loss) print(f" {ix:03d} {loss:.4f} {config_info.get('origin')}") job = Job("") job.result = {"loss": cur_loss} job.kwargs = { "budget": 1, "config": config, "config_info": config_info } cg.new_result(job) res.loc[ix, f"trial-{trial}"] = cur_loss res = raw2min(res) m = res.mean(1) s = res.std(1) name = synthetic_function.get_meta_information()["name"] final_result[name] = {"mean": m.tolist(), "std": s.tolist()} Path("EETPE.json").write_text(json.dumps(final_result))
def main(): for synthetic_function_cls in synthetic_functions: meta_info = synthetic_function_cls.get_meta_information() if "num_function_evals" in meta_info: max_iter = meta_info["num_function_evals"] else: max_iter = base_max_iter # 构造超参空间 config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() # 定义目标函数 def evaluation(config: dict): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"] res = pd.DataFrame(columns=[f"trial-{i}" for i in range(repetitions)], index=range(max_iter)) print(meta_info["name"]) for trial in range(repetitions): random_state = base_random_state + 10 * trial # Scenario object scenario = Scenario({ "run_obj": "quality", # we optimize quality (alternatively runtime) "runcount-limit": max_iter, # max. number of function evaluations; for this example set to a low number "cs": config_space, # configuration space "deterministic": "true" }) smac = SMAC4HPO(scenario=scenario, rng=np.random.RandomState(random_state), tae_runner=evaluation, initial_design_kwargs={"init_budget": 20}) incumbent = smac.optimize() runhistory = smac.runhistory configs = runhistory.get_all_configs() losses = [runhistory.get_cost(config) for config in configs] res[f"trial-{trial}"] = losses print(min(losses)) res = raw2min(res) m = res.mean(1) s = res.std(1) name = synthetic_function.get_meta_information()["name"] final_result[name] = {"mean": m.tolist(), "std": s.tolist()} Path(f"SMAC3.json").write_text(json.dumps(final_result))
def main(optimizer): for synthetic_function_cls in synthetic_functions: meta_info = synthetic_function_cls.get_meta_information() if "num_function_evals" in meta_info: max_iter = meta_info["num_function_evals"] else: max_iter = base_max_iter # 构造超参空间 config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() # 定义目标函数 def evaluation(config: dict): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"] res = pd.DataFrame(columns=[f"trial-{i}" for i in range(repetitions)], index=range(max_iter)) print(meta_info["name"]) for trial in range(repetitions): random_state = base_random_state + trial * 10 ret = fmin(evaluation, config_space, optimizer=optimizer, random_state=random_state, n_iterations=max_iter) print(ret) losses = ret["budget2obvs"][1]["losses"] print(ret["best_loss"]) res[f"trial-{trial}"] = losses res = raw2min(res) m = res.mean(1) s = res.std(1) name = synthetic_function.get_meta_information()["name"] final_result[name] = { "mean": m.tolist(), "std": s.tolist(), "q25": res.quantile(0.25, 1).tolist(), "q75": res.quantile(0.75, 1).tolist(), "q90": res.quantile(0.90, 1).tolist() } Path(f"ultraopt_{optimizer}.json").write_text(json.dumps(final_result))
def main(): for synthetic_function_cls in synthetic_functions: meta_info = synthetic_function_cls.get_meta_information() if "num_function_evals" in meta_info: max_iter = meta_info["num_function_evals"] else: max_iter = base_max_iter # 构造超参空间 config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() space = CS2HyperoptSpace(config_space) # 定义目标函数 def evaluation(config: dict): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"] # 对experiment_param的删除等操作放在存储后面 res = pd.DataFrame(columns=[f"trial-{i}" for i in range(repetitions)], index=range(max_iter)) for trial in range(repetitions): random_state = base_random_state + trial * 10 # 设置超参空间的随机种子(会影响后面的采样) config_space.seed(random_state) trials = Trials() best = fmin( evaluation, space, algo=partial(tpe.suggest, n_startup_jobs=20), max_evals=max_iter, rstate=np.random.RandomState(random_state), trials=trials, ) losses = trials.losses() res[f"trial-{trial}"] = losses res = raw2min(res) m = res.mean(1) s = res.std(1) name = synthetic_function.get_meta_information()["name"] final_result[name] = {"mean": m.tolist(), "std": s.tolist()} Path("hyperopt.json").write_text(json.dumps(final_result))
def main(): for synthetic_function_cls in synthetic_functions: meta_info = synthetic_function_cls.get_meta_information() if "num_function_evals" in meta_info: max_iter = meta_info["num_function_evals"] else: max_iter = base_max_iter # 构造超参空间 config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() # 定义目标函数 def evaluation(config: dict): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"] # 对experiment_param的删除等操作放在存储后面 res = pd.DataFrame(columns=[f"trial-{i}" for i in range(repetitions)], index=range(max_iter)) print(meta_info["name"]) for trial in range(repetitions): random_state = base_random_state + trial * 10 # 设置超参空间的随机种子(会影响后面的采样) ret = fmin(evaluation, config_space, optimizer=ETPEOptimizer(gamma2=0.95), random_state=random_state, n_iterations=max_iter) print(ret) losses = ret["budget2obvs"][1]["losses"] # print('iter | loss | config origin') # print('----------------------------') print(ret["best_loss"]) res[f"trial-{trial}"] = losses res = raw2min(res) m = res.mean(1) s = res.std(1) name = synthetic_function.get_meta_information()["name"] final_result[name] = {"mean": m.tolist(), "std": s.tolist()} Path("ultraopt.json").write_text(json.dumps(final_result))
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author : qichun tang # @Date : 2020-12-20 # @Contact : [email protected] from ConfigSpace import ConfigurationSpace, Configuration __all__ = ["config_space", "evaluate"] from ultraopt.benchmarks.synthetic_functions import MultiFidelityRosenbrock2D synthetic_function_cls = MultiFidelityRosenbrock2D config_space = ConfigurationSpace() config_space.generate_all_continuous_from_bounds( synthetic_function_cls.get_meta_information()['bounds']) synthetic_function = synthetic_function_cls() # 定义目标函数 def evaluate(config: dict, budget=100): config = Configuration(config_space, values=config) return synthetic_function.objective_function(config, budget=budget)["function_value"] - \ synthetic_function.get_meta_information()["f_opt"]