Esempio n. 1
0
def test_tell_not_asked(name: str) -> None:
    param = ng.p.Scalar()
    with testing.suppress_nevergrad_warnings():
        opt = optlib.registry[name](parametrization=param, budget=2, num_workers=2)
    opt.llambda = 2  # type: ignore
    t_10 = opt.parametrization.spawn_child(new_value=10)
    t_100 = opt.parametrization.spawn_child(new_value=100)
    assert not opt.population  # type: ignore
    opt.tell(t_10, 90)  # not asked
    assert len(opt.population) == 1  # type: ignore
    asked = opt.ask()
    opt.tell(asked, 88)
    assert len(opt.population) == 2  # type: ignore
    opt.tell(t_100, 0)  # not asked
    asked = opt.ask()
    opt.tell(asked, 89)
    assert len(opt.population) == 2  # type: ignore
    assert opt.num_tell == 4, opt.num_tell
    assert opt.num_ask == 2
    assert len(opt.population) == 2  # type: ignore
    assert int(opt.recommend().value) == 100
    if isinstance(opt.population, dict):  # type: ignore
        assert t_100.uid in opt.population  # type: ignore
    for point, value in opt.archive.items_as_arrays():
        assert value.count == 1, f"Error for point {point}"
Esempio n. 2
0
def check_optimizer(
    optimizer_cls: tp.Union[base.ConfiguredOptimizer, tp.Type[base.Optimizer]],
    budget: int = 300,
    verify_value: bool = True,
) -> None:
    # recast optimizer do not support num_workers > 1, and respect no_parallelization.
    num_workers = 1 if optimizer_cls.recast or optimizer_cls.no_parallelization else 2
    num_attempts = 1 if not verify_value else 3  # allow 3 attemps to get to the optimum (shit happens...)
    optimum = [0.5, -0.8]
    fitness = Fitness(optimum)
    for k in range(1, num_attempts + 1):
        fitness = Fitness(optimum)
        optimizer = optimizer_cls(parametrization=len(optimum),
                                  budget=budget,
                                  num_workers=num_workers)
        assert isinstance(
            optimizer.provide_recommendation(),
            ng.p.Parameter), "Recommendation should be available from start"
        with testing.suppress_nevergrad_warnings():
            candidate = optimizer.minimize(fitness)
        raised = False
        if verify_value:
            try:
                np.testing.assert_array_almost_equal(candidate.args[0],
                                                     optimum,
                                                     decimal=1)
            except AssertionError as e:
                raised = True
                print(
                    f"Attemp #{k}: failed with best point {tuple(candidate.args[0])}"
                )
                if k == num_attempts:
                    raise e
        if not raised:
            break
    if budget > 100:
        slope, intercept = fitness.get_factors()
        print(f"For your information: slope={slope} and intercept={intercept}")
    # make sure we are correctly tracking the best values
    archive = optimizer.archive
    assert optimizer.current_bests[
        "pessimistic"].pessimistic_confidence_bound == min(
            v.pessimistic_confidence_bound for v in archive.values())
    # add a random point to test tell_not_asked
    assert not optimizer._asked, "All `ask`s  should have been followed by a `tell`"
    try:
        data = np.random.normal(0, 1, size=optimizer.dimension)
        candidate = optimizer.parametrization.spawn_child(
        ).set_standardized_data(data)
        optimizer.tell(candidate, 12.0)
    except Exception as e:  # pylint: disable=broad-except
        if not isinstance(e, base.errors.TellNotAskedNotSupportedError):
            raise AssertionError(
                "Optimizers should raise base.TellNotAskedNotSupportedError "
                "at when telling unasked points if they do not support it"
            ) from e
    else:
        assert optimizer.num_tell == budget + 1
        assert optimizer.num_tell_not_asked == 1
Esempio n. 3
0
def test_optimizers_recommendation(name: str,
                                   recomkeeper: RecommendationKeeper) -> None:
    if name in UNSEEDABLE:
        raise SkipTest("Not playing nicely with the tests (unseedable)")
    if "BO" in name:
        raise SkipTest("BO differs from one computer to another")
    # set up environment
    optimizer_cls = registry[name]
    np.random.seed(None)
    if optimizer_cls.recast:
        np.random.seed(12)
        random.seed(12)  # may depend on non numpy generator
    # budget=6 by default, larger for special cases needing more
    budget = {
        "WidePSO": 100,
        "PSO": 200,
        "MEDA": 100,
        "EDA": 100,
        "MPCEDA": 100,
        "TBPSA": 100
    }.get(name, 6)
    if isinstance(optimizer_cls,
                  (optlib.DifferentialEvolution, optlib.EvolutionStrategy)):
        budget = 80
    dimension = min(16, max(4, int(np.sqrt(budget))))
    # set up problem
    fitness = Fitness([0.5, -0.8, 0, 4] +
                      (5 * np.cos(np.arange(dimension - 4))).tolist())
    with testing.suppress_nevergrad_warnings():
        optim = optimizer_cls(parametrization=dimension,
                              budget=budget,
                              num_workers=1)
        optim.parametrization.random_state.seed(12)
        np.testing.assert_equal(optim.name, name)
        # the following context manager speeds up BO tests
        # BEWARE: BO tests are deterministic but can get different results from a computer to another.
        # Reducing the precision could help in this regard.
        # patched = partial(acq_max, n_warmup=10000, n_iter=2)
        # with patch("bayes_opt.bayesian_optimization.acq_max", patched):
        recom = optim.minimize(fitness)
    if name not in recomkeeper.recommendations.index:
        recomkeeper.recommendations.loc[name, :dimension] = tuple(recom.value)
        raise ValueError(
            f'Recorded the value {tuple(recom.value)} for optimizer "{name}", please rerun this test locally.'
        )
    # BO slightly differs from a computer to another
    decimal = 2 if isinstance(optimizer_cls,
                              optlib.ParametrizedBO) or "BO" in name else 5
    np.testing.assert_array_almost_equal(
        recom.value,
        recomkeeper.recommendations.loc[name, :][:dimension],
        decimal=decimal,
        err_msg="Something has changed, if this is normal, delete the following "
        f"file and rerun to update the values:\n{recomkeeper.filepath}",
    )
    # check that by default the recommendation has been evaluated
    if isinstance(optimizer_cls,
                  optlib.EvolutionStrategy):  # no noisy variants
        assert recom.loss is not None
Esempio n. 4
0
def test_bo_ordering() -> None:
    with testing.suppress_nevergrad_warnings():  # tests do not need to be efficient
        optim = ng.optimizers.ParametrizedBO(initialization="Hammersley")(
            parametrization=ng.p.Choice(range(12)), budget=10
        )
    cand = optim.ask()
    optim.tell(cand, 12)
    optim.provide_recommendation()
Esempio n. 5
0
def test_parametrization_offset(name: str) -> None:
    if "PSO" in name or "BO" in name:
        raise SkipTest("PSO and BO have large initial variance")
    if "Cobyla" in name and platform.system() == "Windows":
        raise SkipTest("Cobyla is flaky on Windows for unknown reasons")
    parametrization = ng.p.Instrumentation(ng.p.Array(init=[1e12, 1e12]))
    with testing.suppress_nevergrad_warnings():
        optimizer = registry[name](parametrization, budget=100, num_workers=1)
    for k in range(10 if "BO" not in name else 2):
        candidate = optimizer.ask()
        assert (
            candidate.args[0][0] > 100
        ), f"Candidate value[0] at iteration #{k} is below 100: {candidate.value}"
        optimizer.tell(candidate, 0)