コード例 #1
0
def _fit_svc(n_jobs=1, n_points=1, cv=None):
    """
    Utility function to fit a larger classification task with SVC
    """

    X, y = make_classification(n_samples=1000,
                               n_features=20,
                               n_redundant=0,
                               n_informative=18,
                               random_state=1,
                               n_clusters_per_class=1)

    opt = BayesSearchCV(
        SVC(),
        {
            'C': Real(1e-3, 1e+3, prior='log-uniform'),
            'gamma': Real(1e-3, 1e+1, prior='log-uniform'),
            'degree': Integer(1, 3),
        },
        n_jobs=n_jobs,
        n_iter=11,
        n_points=n_points,
        cv=cv,
        random_state=42,
    )

    opt.fit(X, y)

    assert_greater(opt.score(X, y), 0.9)
コード例 #2
0
def test_searchcv_reproducibility():
    """
    Test whether results of BayesSearchCV can be reproduced with a fixed
    random state.
    """

    X, y = load_iris(True)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=0)

    random_state = 42

    opt = BayesSearchCV(SVC(random_state=random_state), {
        'C': Real(1e-6, 1e+6, prior='log-uniform'),
        'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
        'degree': Integer(1, 8),
        'kernel': Categorical(['linear', 'poly', 'rbf']),
    },
                        n_iter=11,
                        random_state=random_state)

    opt.fit(X_train, y_train)
    best_est = opt.best_estimator_

    opt2 = clone(opt).fit(X_train, y_train)
    best_est2 = opt2.best_estimator_

    assert getattr(best_est, 'C') == getattr(best_est2, 'C')
    assert getattr(best_est, 'gamma') == getattr(best_est2, 'gamma')
    assert getattr(best_est, 'degree') == getattr(best_est2, 'degree')
    assert getattr(best_est, 'kernel') == getattr(best_est2, 'kernel')
コード例 #3
0
def test_all_points_different(strategy, surrogate):
    """
    Tests whether the parallel optimizer always generates
    different points to evaluate.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    tolerance = 1e-3  # distance above which points are assumed same
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        optimizer.tell(x, [branin(v) for v in x])
        distances = pdist(x)
        assert all(distances > tolerance)
コード例 #4
0
def test_constant_liar_runs(strategy, surrogate, acq_func):
    """
    Tests whether the optimizer runs properly during the random
    initialization phase and beyond

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_func=acq_func,
        acq_optimizer='sampling',
        random_state=0
    )

    # test arguments check
    assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
    assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
    assert_raises(ValueError, optimizer.ask, {"n_points": 0})

    for i in range(n_steps):
        x = optimizer.ask(n_points=n_points, strategy=strategy)
        # check if actually n_points was generated
        assert_equal(len(x), n_points)

        if "ps" in acq_func:
            optimizer.tell(x, [[branin(v), 1.1] for v in x])
        else:
            optimizer.tell(x, [branin(v) for v in x])
コード例 #5
0
def test_reproducible_runs(strategy, surrogate):
    # two runs of the optimizer should yield exactly the same results

    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    points = []
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        points.append(x)
        optimizer.tell(x, [branin(v) for v in x])

    # the x's should be exaclty as they are in `points`
    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)

        assert points[i] == x

        optimizer.tell(x, [branin(v) for v in x])
コード例 #6
0
def test_same_set_of_points_ask(strategy, surrogate):
    """
    For n_points not None, tests whether two consecutive calls to ask
    return the same sets of points.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """

    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=2
    )

    for i in range(n_steps):
        xa = optimizer.ask(n_points, strategy)
        xb = optimizer.ask(n_points, strategy)
        optimizer.tell(xa, [branin(v) for v in xa])
        assert_equal(xa, xb)  # check if the sets of points generated are equal
コード例 #7
0
def test_searchcv_sklearn_compatibility():
    """
    Test whether the BayesSearchCV is compatible with base sklearn methods
    such as clone, set_params, get_params.
    """

    X, y = load_iris(True)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=0)

    # used to try different model classes
    pipe = Pipeline([('model', SVC())])

    # single categorical value of 'model' parameter sets the model class
    lin_search = {
        'model': Categorical([LinearSVC()]),
        'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
    }

    dtc_search = {
        'model': Categorical([DecisionTreeClassifier()]),
        'model__max_depth': Integer(1, 32),
        'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
    }

    svc_search = {
        'model': Categorical([SVC()]),
        'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
        'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
        'model__degree': Integer(1, 8),
        'model__kernel': Categorical(['linear', 'poly', 'rbf']),
    }

    opt = BayesSearchCV(pipe, [(lin_search, 1), svc_search], n_iter=2)

    opt_clone = clone(opt)

    params, params_clone = opt.get_params(), opt_clone.get_params()
    assert params.keys() == params_clone.keys()

    for param, param_clone in zip(params.items(), params_clone.items()):
        assert param[0] == param_clone[0]
        assert isinstance(param[1], type(param_clone[1]))

    opt.set_params(search_spaces=[(dtc_search, 1)])

    opt.fit(X_train, y_train)
    opt_clone.fit(X_train, y_train)

    total_evaluations = len(opt.cv_results_['mean_test_score'])
    total_evaluations_clone = len(opt_clone.cv_results_['mean_test_score'])

    # test if expected number of subspaces is explored
    assert total_evaluations == 1
    assert total_evaluations_clone == 1 + 2
コード例 #8
0
def test_Constraints_init():
    space = Space([
        Real(1, 10),
        Real(1, 10),
        Real(1, 10),
        Integer(0, 10),
        Integer(0, 10),
        Integer(0, 10),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg'))
    ])
    
    cons_list = [Single(0,5.0,'real'),
            Inclusive(1,(3.0,5.0),'real'),
            Exclusive(2,(3.0,5.0),'real'),
            Single(3,5,'integer'),
            Inclusive(4,(3,5),'integer'),
            Exclusive(5,(3,5),'integer'),
            Single(6,'b','categorical'),
            Inclusive(7,('c','d','e'),'categorical'),
            Exclusive(8,('c','d','e'),'categorical'),
            # Note that two ocnstraints are being added to dimension 4 and 5
            Inclusive(4,(7,9),'integer'),
            Exclusive(5,(7,9),'integer'),
    ]

    cons = Constraints(cons_list,space)

    # Test that space and constriants_list are being saved in object
    assert_equal(cons.space, space)
    assert_equal(cons.constraints_list, cons_list)

    # Test that a correct list of single constraints have been made
    assert_equal(len(cons.single),space.n_dims)
    assert_equal(cons.single[1], None)
    assert_equal(cons.single[-1], None)
    assert_not_equal(cons.single[0], None)
    assert_not_equal(cons.single[6],None)

    # Test that a correct list of inclusive constraints have been made
    assert_equal(len(cons.inclusive),space.n_dims)
    assert_equal(cons.inclusive[0],[])
    assert_equal(cons.inclusive[2],[])
    assert_not_equal(not cons.inclusive[1],[])
    assert_not_equal(not cons.inclusive[7],[])
    assert_equal(len(cons.inclusive[4]),2)

    # Test that a correct list of exclusive constraints have been made
    assert_equal(len(cons.exclusive),space.n_dims)
    assert_equal(cons.exclusive[3],[])
    assert_equal(cons.exclusive[7],[])
    assert_not_equal(cons.exclusive[2],[])
    assert_not_equal(cons.exclusive[5],[])
    assert_equal(len(cons.exclusive[5]),2)
コード例 #9
0
def test_real_log_sampling_in_bounds():
    dim = Real(low=1, high=32, prior='log-uniform', transform='normalize')

    # round trip a value that is within the bounds of the space
    #
    # x = dim.inverse_transform(dim.transform(31.999999999999999))
    for n in (32., 31.999999999999999):
        round_tripped = dim.inverse_transform(dim.transform([n]))
        assert np.allclose([n], round_tripped)
        assert n in dim
        assert round_tripped in dim
コード例 #10
0
def test_use_named_args():
    """
    Test the function wrapper @use_named_args which is used
    for wrapping an objective function with named args so it
    can be called by the optimizers which only pass a single
    list as the arg.

    This test does not actually use the optimizers but merely
    simulates how they would call the function.
    """

    # Define the search-space dimensions. They must all have names!
    dim1 = Real(name='foo', low=0.0, high=1.0)
    dim2 = Real(name='bar', low=0.0, high=1.0)
    dim3 = Real(name='baz', low=0.0, high=1.0)

    # Gather the search-space dimensions in a list.
    dimensions = [dim1, dim2, dim3]

    # Parameters that will be passed to the objective function.
    default_parameters = [0.5, 0.6, 0.8]

    # Define the objective function with named arguments
    # and use this function-decorator to specify the search-space dimensions.
    @use_named_args(dimensions=dimensions)
    def func(foo, bar, baz):
        # Assert that all the named args are indeed correct.
        assert foo == default_parameters[0]
        assert bar == default_parameters[1]
        assert baz == default_parameters[2]

        # Return some objective value.
        return foo**2 + bar**4 + baz**8

    # Ensure the objective function can be called with a single
    # argument named x.
    res = func(x=default_parameters)
    assert (isinstance(res, float))

    # Ensure the objective function can be called with a single
    # argument that is unnamed.
    res = func(default_parameters)
    assert (isinstance(res, float))

    # Ensure the objective function can be called with a single
    # argument that is a numpy array named x.
    res = func(x=np.array(default_parameters))
    assert (isinstance(res, float))

    # Ensure the objective function can be called with a single
    # argument that is an unnamed numpy array.
    res = func(np.array(default_parameters))
    assert (isinstance(res, float))
コード例 #11
0
def test_searchcv_runs(surrogate, n_jobs, n_points, cv=None):
    """
    Test whether the cross validation search wrapper around sklearn
    models runs properly with available surrogates and with single
    or multiple workers and different number of parameter settings
    to ask from the optimizer in parallel.

    Parameters
    ----------

    * `surrogate` [str or None]:
        A class of the scikit-optimize surrogate used. None means
        to use default surrogate.

    * `n_jobs` [int]:
        Number of parallel processes to use for computations.

    """

    X, y = load_iris(True)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=0)

    # create an instance of a surrogate if it is not a string
    if surrogate is not None:
        optimizer_kwargs = {'base_estimator': surrogate}
    else:
        optimizer_kwargs = None

    opt = BayesSearchCV(SVC(), {
        'C': Real(1e-6, 1e+6, prior='log-uniform'),
        'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
        'degree': Integer(1, 8),
        'kernel': Categorical(['linear', 'poly', 'rbf']),
    },
                        n_jobs=n_jobs,
                        n_iter=11,
                        n_points=n_points,
                        cv=cv,
                        optimizer_kwargs=optimizer_kwargs)

    opt.fit(X_train, y_train)

    # this normally does not hold only if something is wrong
    # with the optimizaiton procedure as such
    assert_greater(opt.score(X_test, y_test), 0.9)
コード例 #12
0
def test_lhs_arange():
    dim = Categorical(['a', 'b', 'c'])
    dim.lhs_arange(10)
    dim = Integer(1, 20)
    dim.lhs_arange(10)
    dim = Real(-10, 20)
    dim.lhs_arange(10)
コード例 #13
0
def test_searchcv_callback():
    # Test whether callback is used in BayesSearchCV and
    # whether is can be used to interrupt the search loop

    X, y = load_iris(True)
    opt = BayesSearchCV(
        DecisionTreeClassifier(),
        {
            'max_depth': [3],  # additional test for single dimension
            'min_samples_split': Real(0.1, 0.9),
        },
        n_iter=5)
    total_iterations = [0]

    def callback(opt_result):
        # this simply counts iterations
        total_iterations[0] += 1

        # break the optimization loop at some point
        if total_iterations[0] > 2:
            return True  # True == stop optimization

        return False

    opt.fit(X, y, callback=callback)

    assert total_iterations[0] == 3

    # test whether final model was fit
    opt.score(X, y)
コード例 #14
0
def test_constraints_rvs():
    space = Space([
        Real(1, 10),
        Real(1, 10),
        Real(1, 10),
        Integer(0, 10),
        Integer(0, 10),
        Integer(0, 10),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg'))
    ])
    
    cons_list = [Single(0,5.0,'real'),
            Inclusive(1,(3.0,5.0),'real'),
            Exclusive(2,(3.0,5.0),'real'),
            Single(3,5,'integer'),
            Inclusive(4,(3,5),'integer'),
            Exclusive(5,(3,5),'integer'),
            Single(6,'b','categorical'),
            Inclusive(7,('c','d','e'),'categorical'),
            Exclusive(8,('c','d','e'),'categorical'),
            # Note that two constraints are being added to dimension 4 and 5
            Inclusive(4,(7,9),'integer'),
            Exclusive(5,(7,9),'integer'),
    ]

    # Test lenght of samples
    constraints = Constraints(cons_list,space)
    samples = constraints.rvs(n_samples = 100)
    assert_equal(len(samples),100)
    assert_equal(len(samples[0]),space.n_dims)
    assert_equal(len(samples[-1]),space.n_dims)
    
    # Test random state
    samples_a = constraints.rvs(n_samples = 100,random_state = 1)
    samples_b = constraints.rvs(n_samples = 100,random_state = 1)
    samples_c = constraints.rvs(n_samples = 100,random_state = 2)
    assert_equal(samples_a,samples_b)
    assert_not_equal(samples_a,samples_c)

    # Test invalid constraint combinations
    space = Space([Real(0, 1)])
    cons_list = [Exclusive(0,(0.3,0.7),'real'), Inclusive(0,(0.5,0.6),'real')]
    constraints = Constraints(cons_list,space)
    with raises(RuntimeError):
        samples = constraints.rvs(n_samples = 10)
コード例 #15
0
def test_get_constraints():
    # Test that the get_constraints() function returns the constraints that were set byt set_constraints()
    space = Space([Real(1, 10)])
    cons_list = [Single(0,5.0,'real')]
    cons = Constraints(cons_list,space)
    opt = Optimizer(space, "ET")
    opt.set_constraints(cons)
    assert_equal(cons,opt.get_constraints())
コード例 #16
0
def test_lhs():
    SPACE = Space([
        Integer(-20, 20),
        Real(-10.5, 100),
        Categorical(list('abc'))]
    )
    samples = SPACE.lhs(10)
    assert len(samples) == 10
    assert len(samples[0]) == 3
コード例 #17
0
def test_real_bounds():
    # should give same answer as using check_limits() but this is easier
    # to read
    a = Real(1., 2.1)
    assert_false(0.99 in a)
    assert_true(1. in a)
    assert_true(2.09 in a)
    assert_true(2.1 in a)
    assert_false(np.nextafter(2.1, 3.) in a)
コード例 #18
0
def test_searchcv_runs_multiple_subspaces():
    """
    Test whether the BayesSearchCV runs without exceptions when
    multiple subspaces are given.
    """

    X, y = load_iris(True)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        random_state=0)

    # used to try different model classes
    pipe = Pipeline([('model', SVC())])

    # single categorical value of 'model' parameter sets the model class
    lin_search = {
        'model': Categorical([LinearSVC()]),
        'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
    }

    dtc_search = {
        'model': Categorical([DecisionTreeClassifier()]),
        'model__max_depth': Integer(1, 32),
        'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
    }

    svc_search = {
        'model': Categorical([SVC()]),
        'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
        'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
        'model__degree': Integer(1, 8),
        'model__kernel': Categorical(['linear', 'poly', 'rbf']),
    }

    opt = BayesSearchCV(pipe, [(lin_search, 1), (dtc_search, 1), svc_search],
                        n_iter=2)

    opt.fit(X_train, y_train)

    # test if all subspaces are explored
    total_evaluations = len(opt.cv_results_['mean_test_score'])
    assert total_evaluations == 1 + 1 + 2, "Not all spaces were explored!"
コード例 #19
0
def test_lhs_with_constraints():
    # Test that constraints cant be set when lhs is active and that it can be set once the points are exhausted.
    space = Space([Real(1, 10)])
    cons_list = [Single(0,5.0,'real')]
    cons = Constraints(cons_list,space)
    opt = Optimizer(space, "ET",lhs = True,n_initial_points = 2)
    opt.tell([1],0) # Use one initial point so that one is still left
    with raises(RuntimeError): # Error should be thrown when tryin got set constraints
        opt.set_constraints(cons)
    opt = Optimizer(space, "ET",lhs = True,n_initial_points = 2)
    opt.tell([[1],[1]],[0,0]) # Use all of the two initial points
    opt.set_constraints(cons) # Now it should be possible to set constraints
コード例 #20
0
def test_searchcv_total_iterations():
    # Test the total iterations counting property of BayesSearchCV

    opt = BayesSearchCV(
        DecisionTreeClassifier(),
        [
            ({
                'max_depth': (1, 32)
            }, 10),  # 10 iterations here
            {
                'min_samples_split': Real(0.1, 0.9)
            }  # 5 (default) iters here
        ],
        n_iter=5)

    assert opt.total_iterations == 10 + 5
コード例 #21
0
def test_real_distance():
    reals = Real(1, 10)
    for i in range(1, 10+1):
        assert_equal(reals.distance(4.1234, i), abs(4.1234 - i))
コード例 #22
0

@pytest.mark.fast_test
@pytest.mark.parametrize("dimensions, normalizations", [
    (((1, 3), (1., 3.)), ('normalize', 'normalize')),
    (((1, 3), ('a', 'b', 'c')), ('normalize', 'onehot')),
])
def test_normalize_dimensions(dimensions, normalizations):
    space = normalize_dimensions(dimensions)
    for dimension, normalization in zip(space, normalizations):
        assert dimension.transform_ == normalization


@pytest.mark.fast_test
@pytest.mark.parametrize(
    "dimension, name", [(Real(1, 2, name="learning rate"), "learning rate"),
                        (Integer(1, 100, name="no of trees"), "no of trees"),
                        (Categorical(["red, blue"], name="colors"), "colors")])
def test_normalize_dimensions(dimension, name):
    space = normalize_dimensions([dimension])
    assert space.dimensions[0].name == name


@pytest.mark.fast_test
def test_use_named_args():
    """
    Test the function wrapper @use_named_args which is used
    for wrapping an objective function with named args so it
    can be called by the optimizers which only pass a single
    list as the arg.
コード例 #23
0
def pow10map(x):
    return 10.0**x


def pow2intmap(x):
    return int(2.0**x)


def nop(x):
    return x


nnparams = {
    # up to 1024 neurons
    'hidden_layer_sizes': (Real(1.0, 10.0), pow2intmap),
    'activation': (Categorical(['identity', 'logistic', 'tanh', 'relu']), nop),
    'solver': (Categorical(['lbfgs', 'sgd', 'adam']), nop),
    'alpha': (Real(-5.0, -1), pow10map),
    'batch_size': (Real(5.0, 10.0), pow2intmap),
    'learning_rate': (Categorical(['constant', 'invscaling',
                                   'adaptive']), nop),
    'max_iter': (Real(5.0, 8.0), pow2intmap),
    'learning_rate_init': (Real(-5.0, -1), pow10map),
    'power_t': (Real(0.01, 0.99), nop),
    'momentum': (Real(0.1, 0.98), nop),
    'nesterovs_momentum': (Categorical([True, False]), nop),
    'beta_1': (Real(0.1, 0.98), nop),
    'beta_2': (Real(0.1, 0.9999999), nop),
}
コード例 #24
0
def test_real_distance_out_of_range():
    ints = Real(1, 10)
    assert_raises_regex(RuntimeError, "compute distance for values within",
                        ints.distance, 11, 10)
コード例 #25
0
def test_optimizer_with_constraints(acq_optimizer):
    base_estimator = 'GP'
    space = Space([
        Real(1, 10),
        Real(1, 10),
        Real(1, 10),
        Integer(0, 10),
        Integer(0, 10),
        Integer(0, 10),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg'))
    ])

    cons_list = [Single(0,5.0,'real'),Single(3,5,'integer')]
    cons_list_2 = [Single(0,4.0,'real'),Single(3,4,'integer')]
    cons = Constraints(cons_list,space)
    cons_2 = Constraints(cons_list_2,space)

    # Test behavior when not adding constraitns
    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 5)

    # Test that constraint is None when no constraint has been set so far
    assert_equal(opt._constraints,None)

    # Test constraints are still None
    for _ in range(6):
        next_x= opt.ask()
        f_val = np.random.random()*100
        opt.tell(next_x, f_val)
    assert_equal(opt._constraints,None)
    opt.remove_constraints()
    assert_equal(opt._constraints,None)

    # Test behavior when adding constraints in an optimization setting
    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3)
    opt.set_constraints(cons)
    assert_equal(opt._constraints,cons)
    next_x= opt.ask()
    assert_equal(next_x[0],5.0)
    assert_equal(next_x[3],5)
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    assert_equal(opt._constraints,cons)
    opt.set_constraints(cons_2)
    next_x= opt.ask()
    assert_equal(opt._constraints,cons_2)
    assert_equal(next_x[0],4.0)
    assert_equal(next_x[3],4)
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    assert_equal(opt._constraints,cons_2)
    opt.remove_constraints()
    assert_equal(opt._constraints,None)
    next_x= opt.ask()
    assert_not_equal(next_x[0],4.0)
    assert_not_equal(next_x[0],5.0)
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    assert_equal(opt._constraints,None)

    # Test that next_x is changed when adding constraints
    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3)
    assert_false(hasattr(opt,'_next_x'))
    for _ in range(4): # We exhaust initial points
        next_x= opt.ask()
        f_val = np.random.random()*100
        opt.tell(next_x, f_val)
    assert_true(hasattr(opt,'_next_x')) # Now next_x should be in optimizer
    assert_not_equal(next_x[0],4.0)
    assert_not_equal(next_x[0],5.0)
    next_x = opt._next_x
    opt.set_constraints(cons)
    assert_not_equal(opt._next_x,next_x) # Check that next_x has been changed
    assert_equal(opt._next_x[0],5.0)
    assert_equal(opt._next_x[3],5)
    next_x = opt._next_x
    opt.set_constraints(cons_2)
    assert_not_equal(opt._next_x,next_x)
    assert_equal(opt._next_x[0],4.0)
    assert_equal(opt._next_x[3],4)

    # Test that adding a Constraint or constraint_list gives the same
    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3)
    opt.set_constraints(cons_list)
    opt2 = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 3)
    opt2.set_constraints(cons)
    assert_equal(opt._constraints,opt2._constraints)

    # Test that constraints are satisfied
    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 2)
    opt.set_constraints(cons)
    next_x= opt.ask()
    assert_equal(next_x[0],5.0)

    opt = Optimizer(space, base_estimator, acq_optimizer=acq_optimizer,n_initial_points = 2)
    next_x= opt.ask()
    assert_not_equal(next_x[0],5.0)
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    opt.set_constraints(cons)
    next_x= opt.ask()
    assert_equal(next_x[0],5.0)
    assert_equal(next_x[3],5)
    opt.set_constraints(cons)
    next_x= opt.ask()
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    opt.set_constraints(cons_2)
    next_x= opt.ask()
    assert_equal(next_x[0],4.0)
    assert_equal(next_x[3],4)
    f_val = np.random.random()*100
    opt.tell(next_x, f_val)
    assert_equal(next_x[0],4.0)
    assert_equal(next_x[3],4)
コード例 #26
0
def test_normalize():
    a = Real(2.0, 30.0, transform="normalize")
    for i in range(50):
        check_limits(a.rvs(random_state=i), 2, 30)

    rng = np.random.RandomState(0)
    X = rng.randn(100)
    X = 28 * (X - X.min()) / (X.max() - X.min()) + 2

    # Check transformed values are in [0, 1]
    assert_true(np.all(a.transform(X) <= np.ones_like(X)))
    assert_true(np.all(np.zeros_like(X) <= a.transform(X)))

    # Check inverse transform
    assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)

    # log-uniform prior
    a = Real(10**2.0, 10**4.0, prior="log-uniform", transform="normalize")
    for i in range(50):
        check_limits(a.rvs(random_state=i), 10**2, 10**4)

    rng = np.random.RandomState(0)
    X = np.clip(10**3 * rng.randn(100), 10**2.0, 10**4.0)

    # Check transform
    assert_true(np.all(a.transform(X) <= np.ones_like(X)))
    assert_true(np.all(np.zeros_like(X) <= a.transform(X)))

    # Check inverse transform
    assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)

    a = Integer(2, 30, transform="normalize")
    for i in range(50):
        check_limits(a.rvs(random_state=i), 2, 30)
    assert_array_equal(a.transformed_bounds, (0, 1))

    X = rng.randint(2, 31)
    # Check transformed values are in [0, 1]
    assert_true(np.all(a.transform(X) <= np.ones_like(X)))
    assert_true(np.all(np.zeros_like(X) <= a.transform(X)))

    # Check inverse transform
    X_orig = a.inverse_transform(a.transform(X))
    assert_equal(X_orig.dtype, "int64")
    assert_array_equal(X_orig, X)
コード例 #27
0
def test_Constraints_validate_sample():
    space = Space([
        Real(1, 10),
        Real(1, 10),
        Real(1, 10),
        Integer(0, 10),
        Integer(0, 10),
        Integer(0, 10),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg')),
        Categorical(list('abcdefg'))
    ])
    
    # Test validation of single constraints
    cons_list = [Single(0,5.0,'real')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[0] = 5.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 5.00001
    assert_false(cons.validate_sample(sample))
    sample[0] = 4.99999
    assert_false(cons.validate_sample(sample))

    cons_list = [Single(3,5,'integer')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[3] = 5
    assert_true(cons.validate_sample(sample))
    sample[3] = 6
    assert_false(cons.validate_sample(sample))
    sample[3] = -5
    assert_false(cons.validate_sample(sample))
    sample[3] = 5.000001
    assert_false(cons.validate_sample(sample))

    cons_list = [Single(6,'a','categorical')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[6] = 'a'
    assert_true(cons.validate_sample(sample))
    sample[6] = 'b'
    assert_false(cons.validate_sample(sample))
    sample[6] = -5
    assert_false(cons.validate_sample(sample))
    sample[6] = 5.000001
    assert_false(cons.validate_sample(sample))

    # Test validation of inclusive constraints
    cons_list = [Inclusive(0,(5.0,7.0),'real')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[0] = 5.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 7.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 7.00001
    assert_false(cons.validate_sample(sample))
    sample[0] = 4.99999
    assert_false(cons.validate_sample(sample))
    sample[0] = -10
    assert_false(cons.validate_sample(sample))

    cons_list = [Inclusive(3,(5,7),'integer')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[3] = 5
    assert_true(cons.validate_sample(sample))
    sample[3] = 6
    assert_true(cons.validate_sample(sample))
    sample[3] = 7
    assert_true(cons.validate_sample(sample))
    sample[3] = 8
    assert_false(cons.validate_sample(sample))
    sample[3] = 4
    assert_false(cons.validate_sample(sample))
    sample[3] = -4
    assert_false(cons.validate_sample(sample))

    cons_list = [Inclusive(6,('c','d','e'),'categorical')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[6] = 'c'
    assert_true(cons.validate_sample(sample))
    sample[6] = 'e'
    assert_true(cons.validate_sample(sample))
    sample[6] = 'f'
    assert_false(cons.validate_sample(sample))
    sample[6] = -5
    assert_false(cons.validate_sample(sample))
    sample[6] = 3.3
    assert_false(cons.validate_sample(sample))
    sample[6] = 'a'
    assert_false(cons.validate_sample(sample))

    # Test validation of exclusive constraints
    cons_list = [Exclusive(0,(5.0,7.0),'real')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[0] = 5.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 7.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 7.00001
    assert_true(cons.validate_sample(sample))
    sample[0] = 4.99999
    assert_true(cons.validate_sample(sample))
    sample[0] = -10
    assert_true(cons.validate_sample(sample))

    cons_list = [Exclusive(3,(5,7),'integer')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[3] = 5
    assert_false(cons.validate_sample(sample))
    sample[3] = 6
    assert_false(cons.validate_sample(sample))
    sample[3] = 7
    assert_false(cons.validate_sample(sample))
    sample[3] = 8
    assert_true(cons.validate_sample(sample))
    sample[3] = 4
    assert_true(cons.validate_sample(sample))
    sample[3] = -4
    assert_true(cons.validate_sample(sample))

    cons_list = [Exclusive(3,(5,5),'integer')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[3] = 5
    assert_false(cons.validate_sample(sample))

    cons_list = [Exclusive(6,('c','d','e'),'categorical')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[6] = 'c'
    assert_false(cons.validate_sample(sample))
    sample[6] = 'e'
    assert_false(cons.validate_sample(sample))
    sample[6] = 'f'
    assert_true(cons.validate_sample(sample))
    sample[6] = -5
    assert_true(cons.validate_sample(sample))
    sample[6] = 3.3
    assert_true(cons.validate_sample(sample))
    sample[6] = 'a'
    assert_true(cons.validate_sample(sample))

    # Test more than one constraint per dimension
    cons_list = [Inclusive(0,(1.0,2.0),'real'),Inclusive(0,(3.0,4.0),'real'),Inclusive(0,(5.0,6.0),'real')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[0] = 1.3
    assert_true(cons.validate_sample(sample))
    sample[0] = 6.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 5.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 3.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 4.0
    assert_true(cons.validate_sample(sample))
    sample[0] = 5.5
    assert_true(cons.validate_sample(sample))
    sample[0] = 2.1
    assert_false(cons.validate_sample(sample))
    sample[0] = 4.9
    assert_false(cons.validate_sample(sample))
    sample[0] = 7.0
    assert_false(cons.validate_sample(sample))

    cons_list = [Exclusive(0,(1.0,2.0),'real'),Exclusive(0,(3.0,4.0),'real'),Exclusive(0,(5.0,6.0),'real')]
    cons = Constraints(cons_list,space)
    sample = [0]*space.n_dims
    sample[0] = 1.3
    assert_false(cons.validate_sample(sample))
    sample[0] = 6.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 5.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 3.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 4.0
    assert_false(cons.validate_sample(sample))
    sample[0] = 5.5
    assert_false(cons.validate_sample(sample))
    sample[0] = 2.1
    assert_true(cons.validate_sample(sample))
    sample[0] = 4.9
    assert_true(cons.validate_sample(sample))
    sample[0] = 7.0
    assert_true(cons.validate_sample(sample))
コード例 #28
0
def test_real():
    a = Real(1, 25)
    for i in range(50):
        r = a.rvs(random_state=i)
        check_limits(r, 1, 25)
        assert_true(r in a)

    random_values = a.rvs(random_state=0, n_samples=10)
    assert_array_equal(random_values.shape, (10))
    assert_array_equal(a.transform(random_values), random_values)
    assert_array_equal(a.inverse_transform(random_values), random_values)

    log_uniform = Real(10**-5, 10**5, prior="log-uniform")
    assert_not_equal(log_uniform, Real(10**-5, 10**5))
    for i in range(50):
        random_val = log_uniform.rvs(random_state=i)
        check_limits(random_val, 10**-5, 10**5)
    random_values = log_uniform.rvs(random_state=0, n_samples=10)
    assert_array_equal(random_values.shape, (10))
    transformed_vals = log_uniform.transform(random_values)
    assert_array_equal(transformed_vals, np.log10(random_values))
    assert_array_equal(
        log_uniform.inverse_transform(transformed_vals), random_values)
コード例 #29
0
def test_dimension_with_invalid_names(name):
    with pytest.raises(ValueError) as exc:
        Real(1, 2, name=name)
    assert("Dimension's name must be either string or None." ==
           exc.value.args[0])
コード例 #30
0
def test_space_consistency():
    # Reals (uniform)

    s1 = Space([Real(0.0, 1.0)])
    s2 = Space([Real(0.0, 1.0)])
    s3 = Space([Real(0, 1)])
    s4 = Space([(0.0, 1.0)])
    s5 = Space([(0.0, 1.0, "uniform")])
    s6 = Space([(0, 1.0)])
    s7 = Space([(np.float64(0.0), 1.0)])
    s8 = Space([(0, np.float64(1.0))])
    a1 = s1.rvs(n_samples=10, random_state=0)
    a2 = s2.rvs(n_samples=10, random_state=0)
    a3 = s3.rvs(n_samples=10, random_state=0)
    a4 = s4.rvs(n_samples=10, random_state=0)
    a5 = s5.rvs(n_samples=10, random_state=0)
    assert_equal(s1, s2)
    assert_equal(s1, s3)
    assert_equal(s1, s4)
    assert_equal(s1, s5)
    assert_equal(s1, s6)
    assert_equal(s1, s7)
    assert_equal(s1, s8)
    assert_array_equal(a1, a2)
    assert_array_equal(a1, a3)
    assert_array_equal(a1, a4)
    assert_array_equal(a1, a5)

    # Reals (log-uniform)
    s1 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform")])
    s2 = Space([Real(10**-3.0, 10**3.0, prior="log-uniform")])
    s3 = Space([Real(10**-3, 10**3, prior="log-uniform")])
    s4 = Space([(10**-3.0, 10**3.0, "log-uniform")])
    s5 = Space([(np.float64(10**-3.0), 10**3.0, "log-uniform")])
    a1 = s1.rvs(n_samples=10, random_state=0)
    a2 = s2.rvs(n_samples=10, random_state=0)
    a3 = s3.rvs(n_samples=10, random_state=0)
    a4 = s4.rvs(n_samples=10, random_state=0)
    assert_equal(s1, s2)
    assert_equal(s1, s3)
    assert_equal(s1, s4)
    assert_equal(s1, s5)
    assert_array_equal(a1, a2)
    assert_array_equal(a1, a3)
    assert_array_equal(a1, a4)

    # Integers
    s1 = Space([Integer(1, 5)])
    s2 = Space([Integer(1.0, 5.0)])
    s3 = Space([(1, 5)])
    s4 = Space([(np.int64(1.0), 5)])
    s5 = Space([(1, np.int64(5.0))])
    a1 = s1.rvs(n_samples=10, random_state=0)
    a2 = s2.rvs(n_samples=10, random_state=0)
    a3 = s3.rvs(n_samples=10, random_state=0)
    assert_equal(s1, s2)
    assert_equal(s1, s3)
    assert_equal(s1, s4)
    assert_equal(s1, s5)
    assert_array_equal(a1, a2)
    assert_array_equal(a1, a3)

    # Categoricals
    s1 = Space([Categorical(["a", "b", "c"])])
    s2 = Space([Categorical(["a", "b", "c"])])
    s3 = Space([["a", "b", "c"]])
    a1 = s1.rvs(n_samples=10, random_state=0)
    a2 = s2.rvs(n_samples=10, random_state=0)
    a3 = s3.rvs(n_samples=10, random_state=0)
    assert_equal(s1, s2)
    assert_array_equal(a1, a2)
    assert_equal(s1, s3)
    assert_array_equal(a1, a3)

    s1 = Space([(True, False)])
    s2 = Space([Categorical([True, False])])
    s3 = Space([np.array([True, False])])
    assert s1 == s2 == s3