Beispiel #1
0
def test_parameter_from_dict_bad_sample():
    data = {
        "loss": 0.8,
        "sample": [2],
    }
    with pytest.raises(ValidationError):
        Observation.from_dict(data)
Beispiel #2
0
def test_parameter_from_dict_missing_loss():
    data = {
        "sample": {
            "alpha": 2
        },
    }
    with pytest.raises(ValidationError):
        Observation.from_dict(data)
Beispiel #3
0
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    np.random.seed(seed=seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(
        optimization_problem_parameters)
    optimizer = optimizers[optimizer_type](optimization_problem)
    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1,
                                                   number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(
            loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
def test_parameter_from_dict():
    data = {
        "sample": {"alpha": 2},
        "loss": 0.8
    }
    observation = Observation.from_dict(data)
    assert observation.parameters_name == set(["alpha"])
Beispiel #5
0
def minimize(
    f,
    optimization_problem,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):

    np.random.seed(seed=seed)

    samples = []
    optimizer = optimizers[optimizer_type](optimization_problem)
    for _ in range(number_of_evaluation):
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
Beispiel #6
0
def minimize(
    f,
    optimization_problem_parameters,
    optimizer_type="parzen_estimator",
    number_of_evaluation=100,
    seed=None,
    debug=False,
):
    logger = logging.getLogger("benderopt")

    RNG.seed(seed)

    samples = []
    optimization_problem = OptimizationProblem.from_list(optimization_problem_parameters)

    if isinstance(optimizer_type, str):
        optimizer_type = optimizers[optimizer_type]
    if not issubclass(optimizer_type, BaseOptimizer):
        raise ValueError(
            "optimizer_type should either be a string or a subclass of BaseOptimizer, got {}".format(
                optimizer_type
            )
        )
    optimizer = optimizer_type(optimization_problem)

    for i in range(number_of_evaluation):
        logger.info("Evaluating {0}/{1}...".format(i + 1, number_of_evaluation))
        sample = optimizer.suggest()
        samples.append(sample)
        loss = f(**sample)
        logger.debug("f={0} for optimizer suggestion: {1}.".format(loss, sample))
        observation = Observation.from_dict({"loss": loss, "sample": sample})
        optimization_problem.add_observation(observation)
    if debug is True:
        return samples
    return optimization_problem.best_sample
Beispiel #7
0
def test_parameter_from_dict_missing_sample():
    data = {"loss": 0.8}
    with pytest.raises(ValidationError):
        Observation.from_dict(data)