Ejemplo n.º 1
0
def test_purely_categorical_space():
    # TODO: Refactor - Use PyTest
    # Test reproduces the bug in #908, make sure it doesn't come back
    dims = [Categorical(["a", "b", "c"]), Categorical(["A", "B", "C"])]
    optimizer = Optimizer(dims, n_initial_points=1, random_state=3)

    x = optimizer.ask()
    # Before the fix this call raised an exception
    optimizer.tell(x, 1.0)
Ejemplo n.º 2
0
def test_returns_result_object():
    # TODO: Refactor - Use PyTest
    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1, acq_optimizer="sampling")
    result = opt.tell([1.5], 2.0)

    assert isinstance(result, OptimizeResult)
    assert len(result.x_iters) == len(result.func_vals)
    assert np.min(result.func_vals) == result.fun
Ejemplo n.º 3
0
def test_acq_optimizer_with_time_api(base_estimator, acq_func):
    # TODO: Refactor - Use PyTest
    opt = Optimizer(
        [(-2.0, 2.0)],
        base_estimator=base_estimator,
        acq_func=acq_func,
        acq_optimizer="sampling",
        n_initial_points=2,
    )
    x1 = opt.ask()
    opt.tell(x1, (bench1(x1), 1.0))
    x2 = opt.ask()
    res = opt.tell(x2, (bench1(x2), 2.0))

    # `x1` and `x2` are random.
    assert x1 != x2

    assert len(res.models) == 1
    assert_array_equal(res.func_vals.shape, (2,))
    assert_array_equal(res.log_time.shape, (2,))

    # x3 = opt.ask()
    # TODO: Refactor - Split into separate error test
    with pytest.raises(TypeError):
        opt.tell(x2, bench1(x2))
Ejemplo n.º 4
0
def test_optimizer_base_estimator_string_invalid(base_estimator):
    """Check that error is raised when `Optimizer` is given an invalid `base_estimator` string"""
    err = r"Expected `base_estimator` in \['GP', 'ET', 'RF', 'GBRT', 'DUMMY'\]\. Got {}".format(
        base_estimator
    )
    with pytest.raises(ValueError, match=err):
        Optimizer([(-2.0, 2.0)], base_estimator=base_estimator, n_initial_points=1)
Ejemplo n.º 5
0
def test_acq_optimizer(base_estimator):
    err = f"Regressor {type(base_estimator)} requires `acq_optimizer`='sampling'"
    with pytest.raises(ValueError, match=err):
        Optimizer([(-2.0, 2.0)],
                  base_estimator=base_estimator,
                  n_initial_points=1,
                  acq_optimizer="lbfgs")
Ejemplo n.º 6
0
def et_optimizer(request):
    dimensions = request.param
    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer(dimensions,
                    base_estimator,
                    n_initial_points=1,
                    acq_optimizer="sampling")
    return opt
Ejemplo n.º 7
0
def test_optimizer_copy(acq_func):
    """Check that base estimator, objective and target values are copied correctly"""
    # TODO: Refactor - Use PyTest

    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer(
        [(-2.0, 2.0)],
        base_estimator,
        acq_func=acq_func,
        n_initial_points=1,
        acq_optimizer="sampling",
    )

    # Run three iterations so that we have some points and objective values
    if "ps" in acq_func:
        opt.run(bench1_with_time, n_iter=3)
    else:
        opt.run(bench1, n_iter=3)

    opt_copy = opt.copy()
    copied_estimator = opt_copy.base_estimator

    if "ps" in acq_func:
        assert isinstance(copied_estimator, MultiOutputRegressor)
        # Check that `base_estimator` is not wrapped multiple times
        assert not isinstance(copied_estimator.estimator, MultiOutputRegressor)
    else:
        assert not isinstance(copied_estimator, MultiOutputRegressor)

    assert_array_equal(opt_copy.Xi, opt.Xi)
    assert_array_equal(opt_copy.yi, opt.yi)
Ejemplo n.º 8
0
def test_exhaust_initial_calls(base_estimator):
    """Check that a model is fitted and used to make suggestions after adding at least
    `n_initial_points` via `tell`"""
    # TODO: Refactor - Use PyTest

    opt = Optimizer([(-2.0, 2.0)],
                    base_estimator,
                    n_initial_points=2,
                    acq_optimizer="sampling",
                    random_state=1)

    # Until surrogate model in `Optimizer` has been fitted (after `tell`-ing `n_initial_points`),
    #   `ask` returns random points, which is why `x0` and `x1` must be different
    x0 = opt.ask()  # Random point
    x1 = opt.ask()  # Random point
    assert x0 != x1

    #################### First `tell` Call ####################
    # `tell` with a dummy objective value
    r1 = opt.tell(x1, 3.0)
    assert len(r1.models) == 0
    # Surrogate model still not fitted because only 1 / `n_initial_points` has been `tell`-ed
    x2 = opt.ask()  # Random point
    assert x1 != x2

    #################### Second `tell` Call ####################
    r2 = opt.tell(x2, 4.0)
    # After `tell`-ing a second point, a surrogate model is fitted - Unless using "dummy" estimator
    if base_estimator.lower() == "dummy":
        assert len(r2.models) == 0
    else:
        assert len(r2.models) == 1

    #################### First Non-Random Point ####################
    x3 = opt.ask()
    assert x2 != x3
    x4 = opt.ask()
    r3 = opt.tell(x3, 1.0)

    # No new information was added, so should be the same, unless we are using the dummy estimator,
    #   which will forever return random points and never fits any models
    if base_estimator.lower() == "dummy":
        assert x3 != x4
        assert len(r3.models) == 0
    else:
        assert x3 == x4
        assert len(r3.models) == 2
Ejemplo n.º 9
0
def test_warn_on_re_ask(base_estimator, next_x):
    """Test that `Optimizer.warn_on_re_ask` logs warning when `Optimizer._ask` suggests a point
    that has already been `tell`-ed to `Optimizer`

    Notes
    -----
    "DUMMY"/"dummy" is invalid for `base_estimator` here because it always suggests random points"""
    # Initialize `Optimizer` and `tell` it about `next_x`
    opt = Optimizer(
        [(-2.0, 2.0)], base_estimator, n_initial_points=1, random_state=1, warn_on_re_ask=True
    )
    opt.tell(next_x, 1.0)

    # Force `Optimizer._next_x` (set by `Optimizer._tell`) to the point we just told it
    opt._next_x = next_x

    with pytest.warns(UserWarning, match="Repeated suggestion: .*"):
        opt.ask()
Ejemplo n.º 10
0
def test_optimizer_base_estimator_string_smoke(base_estimator):
    opt = Optimizer([(-2.0, 2.0)], base_estimator=base_estimator, n_initial_points=1, acq_func="EI")
    opt.run(func=lambda x: x[0] ** 2, n_iter=3)