Ejemplo n.º 1
0
def test_optimization_problem():
    parameter1 = Parameter(
        name="param1", category="categorical", search_space={"values": ["a", "b"]}
    )
    parameter2 = Parameter(name="param2", category="uniform", search_space={"low": 1, "high": 2})
    parameters = [parameter1, parameter2]
    optimization_problem = OptimizationProblem(parameters)
    observation1 = Observation(sample={"param1": "a", "param2": 1.5}, loss=1.5)
    optimization_problem.add_observation(observation1)
    observation2 = Observation(sample={"param1": "b", "param2": 1.8}, loss=1.8)
    optimization_problem.add_observation(observation2)
    observation3 = Observation(sample={"param1": "b", "param2": 1.05}, loss=0.1)
    optimization_problem.add_observation(observation3)

    assert type(optimization_problem.parameters) == list
    assert len(optimization_problem.observations) == 3
    assert optimization_problem.parameters_name == set(["param1", "param2"])
    assert observation1.sample in optimization_problem.samples
    assert len(optimization_problem.samples) == 3
    assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05}
    assert optimization_problem.sorted_observations[0].sample == {"param1": "b", "param2": 1.05}
    assert optimization_problem.finite is False
    assert len(optimization_problem.find_observations({"param1": "b", "param2": 1.05})) == 1
    a, b = optimization_problem.observations_quantile(0.5)
    assert len(a) == 1
    assert len(b) == 2
    assert optimization_problem.get_best_k_samples(1)[0].sample == {"param1": "b", "param2": 1.05}
Ejemplo n.º 2
0
def test_parameter_from_dict_bad_sample():
    data = {
        "loss": 0.8,
        "sample": [2],
    }
    with pytest.raises(ValidationError):
        Observation.from_dict(data)
Ejemplo n.º 3
0
def test_parameter_from_dict_missing_loss():
    data = {
        "sample": {
            "alpha": 2
        },
    }
    with pytest.raises(ValidationError):
        Observation.from_dict(data)
Ejemplo n.º 4
0
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    np.random.seed(seed=seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(
        optimization_problem_parameters)
    optimizer = optimizers[optimizer_type](optimization_problem)
    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1,
                                                   number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(
            loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
def test_parameter_from_dict():
    data = {
        "sample": {"alpha": 2},
        "loss": 0.8
    }
    observation = Observation.from_dict(data)
    assert observation.parameters_name == set(["alpha"])
Ejemplo n.º 6
0
def test_optimization_problem_add_bad_observation():
    optimization_problem = OptimizationProblem.from_list(
        [
            {"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
            {"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
        ]
    )
    observation2 = Observation(sample={"lol": "b", "param2": 1.8}, loss=1.8)
    with pytest.raises(ValidationError):
        optimization_problem.add_observation(observation2)
Ejemplo n.º 7
0
def minimize(
    f,
    optimization_problem,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):

    np.random.seed(seed=seed)

    samples = []
    optimizer = optimizers[optimizer_type](optimization_problem)
    for _ in range(number_of_evaluation):
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
Ejemplo n.º 8
0
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    RNG.seed(seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters)

    if isinstance(optimizer_type, str):
        optimizer_type = optimizers[optimizer_type]
    if not issubclass(optimizer_type, BaseOptimizer):
        raise ValueError(
            "optimizer_type should either be a string or a subclass of BaseOptimizer, got {}".format(
                optimizer_type
            )
        )
    optimizer = optimizer_type(optimization_problem)

    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1, number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
Ejemplo n.º 9
0
def test_parameter_init():
    observation = Observation(sample={"alpha": 2}, loss=0.8)
    assert observation.parameters_name == set(["alpha"])
Ejemplo n.º 10
0
def test_parameter_from_dict_missing_sample():
    data = {"loss": 0.8}
    with pytest.raises(ValidationError):
        Observation.from_dict(data)
Ejemplo n.º 11
0
def test_parameter_bad_sample_format():
    with pytest.raises(ValidationError):
        Observation(sample=[2], loss=0.8)