コード例 #1
0
def test_optimization_problem_add_bad_type():
    with pytest.raises(ValidationError):
        OptimizationProblem.from_list({
            "name": "param1",
            "category": "categorical",
            "search_space": {
                "values": ["a", "b"]
            }
        })
コード例 #2
0
def test_optimization_problem_from_list():
    optimization_problem = OptimizationProblem.from_list(
        [
            {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
            {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
        ]
    )

    optimization_problem.add_observations_from_list(
        [
            {"loss": 1.5, "sample": {"param1": "a", "param2": 1.5}},
            {"loss": 1.8, "sample": {"param1": "b", "param2": 1.8}},
            {"loss": 0.1, "sample": {"param1": "b", "param2": 1.05}},
        ],
        raise_exception=True,
    )

    assert type(optimization_problem.parameters) == list
    assert len(optimization_problem.observations) == 3
    assert optimization_problem.parameters_name == set(["param1", "param2"])
    assert {"param1": "b", "param2": 1.8} in optimization_problem.samples
    assert len(optimization_problem.samples) == 3
    assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05}
    assert optimization_problem.sorted_observations[0].sample == {"param1": "b", "param2": 1.05}
    assert optimization_problem.finite is False
    assert len(optimization_problem.find_observations({"param1": "b", "param2": 1.05})) == 1
    a, b = optimization_problem.observations_quantile(0.5)
    assert len(a) == 1
    assert len(b) == 2
    assert optimization_problem.get_best_k_samples(1)[0].sample == {"param1": "b", "param2": 1.05}
コード例 #3
0
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    np.random.seed(seed=seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(
        optimization_problem_parameters)
    optimizer = optimizers[optimizer_type](optimization_problem)
    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1,
                                                   number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(
            loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
コード例 #4
0
def test_optimization_problem_from_list_bad_type():
    optimization_problem = OptimizationProblem.from_list(
        [
            {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
            {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
        ]
    )
    with pytest.raises(ValidationError):
        optimization_problem.add_observations_from_list("lol", raise_exception=True)
コード例 #5
0
def test_optimization_problem_add_bad_observation():
    optimization_problem = OptimizationProblem.from_list(
        [
            {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
            {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
        ]
    )
    observation2 = Observation(sample={"lol": "b", "param2": 1.8}, loss=1.8)
    with pytest.raises(ValidationError):
        optimization_problem.add_observation(observation2)
コード例 #6
0
def test_base_optimize_ok():

    optimization_problem = [
        {"name": "x", "category": "uniform", "search_space": {"low": 0, "high": np.pi,}}
    ]

    optimization_problem = OptimizationProblem.from_list(optimization_problem)
    optimizer = optimizers["random"](optimization_problem)
    assert optimizer.observations == optimization_problem.observations
    optimizer = optimizers["random"](optimization_problem)
    assert len(optimizer.suggest(10)) == 10
コード例 #7
0
def minimize(f,
             optimization_problem_parameters,
             optimizer_type="parzen_estimator",
             number_of_evaluation=100,
             seed=None,
             debug=False,
             ):

    np.random.seed(seed=seed)
    if type(optimization_problem_parameters) == list:
        optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters)

    samples = []
    optimizer = optimizers[optimizer_type](optimization_problem)
    for _ in range(number_of_evaluation):
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
コード例 #8
0
ファイル: minimizer.py プロジェクト: vthorey/benderopt
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    RNG.seed(seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters)

    if isinstance(optimizer_type, str):
        optimizer_type = optimizers[optimizer_type]
    if not issubclass(optimizer_type, BaseOptimizer):
        raise ValueError(
            "optimizer_type should either be a string or a subclass of BaseOptimizer, got {}".format(
                optimizer_type
            )
        )
    optimizer = optimizer_type(optimization_problem)

    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1, number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
コード例 #9
0
ファイル: minimizer.py プロジェクト: Asherkab/benderopt
        samples.append(sample)
        loss = f(**sample)
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample


if __name__ == "__main__":

    def f(x):
        return np.sin(x)

    optimization_problem_parameters = [{
        "name": "x",
        "category": "uniform",
        "search_space": {
            "low": 0,
            "high": 2 * np.pi,
        }
    }]
    optimization_problem = OptimizationProblem.from_list(
        optimization_problem_parameters)

    best_sample = minimize(f,
                           optimization_problem=optimization_problem,
                           number_of_evaluation=100)

    print(best_sample["x"], 3 * np.pi / 2)