def test_optimization_problem_add_bad_type(): with pytest.raises(ValidationError): OptimizationProblem.from_list({ "name": "param1", "category": "categorical", "search_space": { "values": ["a", "b"] } })
def test_optimization_problem_from_list(): optimization_problem = OptimizationProblem.from_list( [ {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}}, {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}}, ] ) optimization_problem.add_observations_from_list( [ {"loss": 1.5, "sample": {"param1": "a", "param2": 1.5}}, {"loss": 1.8, "sample": {"param1": "b", "param2": 1.8}}, {"loss": 0.1, "sample": {"param1": "b", "param2": 1.05}}, ], raise_exception=True, ) assert type(optimization_problem.parameters) == list assert len(optimization_problem.observations) == 3 assert optimization_problem.parameters_name == set(["param1", "param2"]) assert {"param1": "b", "param2": 1.8} in optimization_problem.samples assert len(optimization_problem.samples) == 3 assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05} assert optimization_problem.sorted_observations[0].sample == {"param1": "b", "param2": 1.05} assert optimization_problem.finite is False assert len(optimization_problem.find_observations({"param1": "b", "param2": 1.05})) == 1 a, b = optimization_problem.observations_quantile(0.5) assert len(a) == 1 assert len(b) == 2 assert optimization_problem.get_best_k_samples(1)[0].sample == {"param1": "b", "param2": 1.05}
def minimize( f, optimization_problem_parameters, optimizer_type="parzen_estimator", number_of_evaluation=100, seed=None, debug=False, ): logger = logging.getLogger("benderopt") np.random.seed(seed=seed) samples = [] optimization_problem = OptimizationProblem.from_list( optimization_problem_parameters) optimizer = optimizers[optimizer_type](optimization_problem) for i in range(number_of_evaluation): logger.info("Evaluating {0}/{1}...".format(i + 1, number_of_evaluation)) sample = optimizer.suggest() samples.append(sample) loss = f(**sample) logger.debug("f={0} for optimizer suggestion: {1}.".format( loss, sample)) observation = Observation.from_dict({"loss": loss, "sample": sample}) optimization_problem.add_observation(observation) if debug is True: return samples return optimization_problem.best_sample
def test_optimization_problem_from_list_bad_type(): optimization_problem = OptimizationProblem.from_list( [ {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}}, {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}}, ] ) with pytest.raises(ValidationError): optimization_problem.add_observations_from_list("lol", raise_exception=True)
def test_optimization_problem_add_bad_observation(): optimization_problem = OptimizationProblem.from_list( [ {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}}, {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}}, ] ) observation2 = Observation(sample={"lol": "b", "param2": 1.8}, loss=1.8) with pytest.raises(ValidationError): optimization_problem.add_observation(observation2)
def test_base_optimize_ok(): optimization_problem = [ {"name": "x", "category": "uniform", "search_space": {"low": 0, "high": np.pi,}} ] optimization_problem = OptimizationProblem.from_list(optimization_problem) optimizer = optimizers["random"](optimization_problem) assert optimizer.observations == optimization_problem.observations optimizer = optimizers["random"](optimization_problem) assert len(optimizer.suggest(10)) == 10
def minimize(f, optimization_problem_parameters, optimizer_type="parzen_estimator", number_of_evaluation=100, seed=None, debug=False, ): np.random.seed(seed=seed) if type(optimization_problem_parameters) == list: optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters) samples = [] optimizer = optimizers[optimizer_type](optimization_problem) for _ in range(number_of_evaluation): sample = optimizer.suggest() samples.append(sample) loss = f(**sample) observation = Observation.from_dict({"loss": loss, "sample": sample}) optimization_problem.add_observation(observation) if debug is True: return samples return optimization_problem.best_sample
def minimize( f, optimization_problem_parameters, optimizer_type="parzen_estimator", number_of_evaluation=100, seed=None, debug=False, ): logger = logging.getLogger("benderopt") RNG.seed(seed) samples = [] optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters) if isinstance(optimizer_type, str): optimizer_type = optimizers[optimizer_type] if not issubclass(optimizer_type, BaseOptimizer): raise ValueError( "optimizer_type should either be a string or a subclass of BaseOptimizer, got {}".format( optimizer_type ) ) optimizer = optimizer_type(optimization_problem) for i in range(number_of_evaluation): logger.info("Evaluating {0}/{1}...".format(i + 1, number_of_evaluation)) sample = optimizer.suggest() samples.append(sample) loss = f(**sample) logger.debug("f={0} for optimizer suggestion: {1}.".format(loss, sample)) observation = Observation.from_dict({"loss": loss, "sample": sample}) optimization_problem.add_observation(observation) if debug is True: return samples return optimization_problem.best_sample
def get_test_optimization_problem(): from benderopt.base import OptimizationProblem return OptimizationProblem.from_json( "{}/tests/test_data.json".format(os.path.dirname(os.path.abspath(__file__))) )
def test_optimization_problem(): parameter1 = Parameter(name="param1", category="categorical", search_space={"values": ["a", "b"]}) parameter2 = Parameter(name="param2", category="uniform", search_space={ "low": 1, "high": 2 }) parameters = [parameter1, parameter2] optimization_problem = OptimizationProblem(parameters) observation1 = Observation(sample={"param1": "a", "param2": 1.5}, loss=1.5) optimization_problem.add_observation(observation1) observation2 = Observation(sample={"param1": "b", "param2": 1.8}, loss=1.8) optimization_problem.add_observation(observation2) observation3 = Observation(sample={ "param1": "b", "param2": 1.05 }, loss=0.1) optimization_problem.add_observation(observation3) assert type(optimization_problem.parameters) == list assert len(optimization_problem.observations) == 3 assert optimization_problem.parameters_name == set(["param1", "param2"]) assert observation1.sample in optimization_problem.samples assert len(optimization_problem.samples) == 3 assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05} assert optimization_problem.sorted_observations[0].sample == { "param1": "b", "param2": 1.05 } assert optimization_problem.finite is False assert len( optimization_problem.find_observations({ "param1": "b", "param2": 1.05 })) == 1 a, b = optimization_problem.observations_quantile(0.5) assert len(a) == 1 assert len(b) == 2 assert optimization_problem.get_best_k_samples(1)[0].sample == { "param1": "b", "param2": 1.05 }
def test_optimization_problem_bad_param_type(): with pytest.raises(ValidationError): OptimizationProblem(["lol"])
samples.append(sample) loss = f(**sample) observation = Observation.from_dict({"loss": loss, "sample": sample}) optimization_problem.add_observation(observation) if debug is True: return samples return optimization_problem.best_sample if __name__ == "__main__": def f(x): return np.sin(x) optimization_problem_parameters = [{ "name": "x", "category": "uniform", "search_space": { "low": 0, "high": 2 * np.pi, } }] optimization_problem = OptimizationProblem.from_list( optimization_problem_parameters) best_sample = minimize(f, optimization_problem=optimization_problem, number_of_evaluation=100) print(best_sample["x"], 3 * np.pi / 2)