def test_local_search_acquisition_optimizer_neighbours():
    np.random.seed(0)
    space = ParameterSpace([
        CategoricalParameter('a', OneHotEncoding([1, 2, 3])),
        CategoricalParameter('b', OrdinalEncoding([0.1, 1, 2])),
        CategoricalParameter('c', OrdinalEncoding([0.1, 1, 2])),
        DiscreteParameter('d', [0.1, 1.2, 2.3]),
        ContinuousParameter('e', 0, 100),
        DiscreteParameter('no_neighbours', [1]),
        DiscreteParameter('f', [0.1, 1.2, 2.3]),
    ])
    x = np.array([1, 0, 0, 1.6, 2.9, 0.1, 50, 1.2, 1.])
    optimizer = LocalSearchAcquisitionOptimizer(space, 1000, 3, num_continuous=1)

    neighbourhood = optimizer._neighbours_per_parameter(x, space.parameters)
    assert_equal(np.array([[0, 1, 0], [0, 0, 1]]), neighbourhood[0])
    assert_equal(np.array([[1], [3]]), neighbourhood[1])
    assert_equal(np.array([[2]]), neighbourhood[2])
    assert_equal(np.array([[1.2]]), neighbourhood[3])
    assert_almost_equal(np.array([[53.5281047]]), neighbourhood[4])
    assert_equal(np.empty((0, 1)), neighbourhood[5])
    assert_equal(np.array([[0.1], [2.3]]), neighbourhood[6])

    neighbours = optimizer._neighbours(x, space.parameters)
    assert_almost_equal(np.array([
        [0, 1, 0, 2., 3., 0.1, 50., 1., 1.2],
        [0, 0, 1, 2., 3., 0.1, 50., 1., 1.2],
        [1, 0, 0, 1., 3., 0.1, 50., 1., 1.2],
        [1, 0, 0, 3., 3., 0.1, 50., 1., 1.2],
        [1, 0, 0, 2., 2., 0.1, 50., 1., 1.2],
        [1, 0, 0, 2., 3., 1.2, 50., 1., 1.2],
        [1, 0, 0, 2., 3., 0.1, 50.80031442, 1., 1.2],
        [1, 0, 0, 2., 3., 0.1, 50., 1., 0.1],
        [1, 0, 0, 2., 3., 0.1, 50., 1., 2.3],
    ]), space.round(neighbours))
Beispiel #2
0
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ["past", "present", "yet to come"]
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace([
        ContinuousParameter("real_param", 0.0, 1.0),
        CategoricalParameter("categorical_param", encoding)
    ])

    x_init = parameter_space.sample_uniform(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    acquisition = ExpectedImprovement(model)

    loop = BayesianOptimizationLoop(parameter_space, model, acquisition)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(
        np.logical_or(loop.loop_state.X[:, 1:3] == 0.0,
                      loop.loop_state.X[:, 1:3] == 1.0))
Beispiel #3
0
def test_check_in_domain_with_bandit_parameter():
    mixed_space_with_bandit = ParameterSpace([
        ContinuousParameter("c", 1.0, 5.0),
        DiscreteParameter("d", [0, 1, 2]),
        CategoricalParameter("cat", OneHotEncoding(["blue", "red"])),
        BanditParameter("bandit", np.array([[0, 1], [1, 1], [1.0, 0]])),
    ])
    x_test = np.array([[1.5, 0, 1.0, 0.0, 0, 1], [1.5, 0, 1.0, 0.0, 0.0, 0.0]])
    in_domain = mixed_space_with_bandit.check_points_in_domain(x_test)
    assert np.array_equal(in_domain, np.array([True, False]))
def test_check_in_domain_with_bandit_parameter():
    mixed_space_with_bandit = ParameterSpace([
        ContinuousParameter('c', 1.0, 5.0),
        DiscreteParameter('d', [0, 1, 2]),
        CategoricalParameter('cat', OneHotEncoding(['blue', 'red'])),
        BanditParameter('bandit', np.array([[0, 1], [1, 1], [1., 0]]))
    ])
    x_test = np.array([[1.5, 0, 1., 0., 0, 1], [1.5, 0, 1., 0., 0., 0.]])
    in_domain = mixed_space_with_bandit.check_points_in_domain(x_test)
    assert np.array_equal(in_domain, np.array([True, False]))
Beispiel #5
0
def catg_space():
    return ParameterSpace([
        ContinuousParameter("x1", 0, 15),
        CategoricalParameter("x2", OneHotEncoding(["A", "B", "C", "D"])),
        CategoricalParameter("x3", OneHotEncoding([1, 2, 3, 4, 5])),
        ContinuousParameter("x4", -2, 3),
    ])
Beispiel #6
0
def space():
    space = ParameterSpace([
        ContinuousParameter("x1", 0, 1),
        ContinuousParameter("x2", 0, 1),
        ContinuousParameter("x3", 0, 1)
    ])
    return space
def test_local_search_acquisition_optimizer(simple_square_acquisition):
    space = ParameterSpace(
        [CategoricalParameter("x", OrdinalEncoding(np.arange(0, 100)))])
    optimizer = LocalSearchAcquisitionOptimizer(space, 1000, 3)

    opt_x, opt_val = optimizer.optimize(simple_square_acquisition)
    # ordinal encoding is as integers 1, 2, ...
    np.testing.assert_array_equal(opt_x, np.array([[1.0]]))
    np.testing.assert_array_equal(opt_val, np.array([[0.0]]))

    class UnknownParameter(Parameter):
        def __init__(self, name: str):
            self.name = name

        def sample_uniform(num_points):
            return np.random.randint(0, 1, (num_points, 1))

    space.parameters.append(UnknownParameter("y"))
    with pytest.raises(TypeError):
        optimizer.optimize(simple_square_acquisition)
    space.parameters.pop()

    class UnknownEncoding(Encoding):
        def __init__(self):
            super().__init__([1], [[1]])

    space.parameters.append(CategoricalParameter("y", UnknownEncoding()))
    with pytest.raises(TypeError):
        optimizer.optimize(simple_square_acquisition)
    space.parameters.pop()
def multi_source_optimizer():
    mock_acquisition_optimizer = mock.create_autospec(AcquisitionOptimizer)
    mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
    space = ParameterSpace(
        [ContinuousParameter('x', 0, 1),
         InformationSourceParameter(2)])
    return MultiSourceAcquisitionOptimizer(mock_acquisition_optimizer, space)
Beispiel #9
0
def branin_function():
    """
    Two-dimensional Branin, often used as an optimization benchmark.

    Based on: https://www.sfu.ca/~ssurjano/branin.html

    .. math::
        f(\mathbf{x}) = (x_2 - b x_1 ^ 2 + c x_1 - r) ^ 2 + s(1 - t) \cos(x_1) + s

    where:

    .. math::
        b = 5.1 / (4 \pi ^ 2)

        c = 5 /\pi

        r = 6

        s = 10

        t = 1 / (8\pi)
    """

    parameter_space = ParameterSpace(
        [ContinuousParameter("x1", -5, 10),
         ContinuousParameter("x2", 0, 15)])
    return _branin, parameter_space
def space():
    space = ParameterSpace([
        ContinuousParameter('x1', 0, 1),
        ContinuousParameter('x2', 0, 1),
        ContinuousParameter('x3', 0, 1)
    ])
    return space
Beispiel #11
0
def get_next_memory_windows(trace_file, cache_type, cache_size, n_early_stop,
                            seq_start, parameters, args_global):
    # use 80% warmup, manually check for unimodal behavior
    n_warmup = int(0.8 * n_early_stop)
    # per cache size parameters overwrite other parameters
    df = database.load_reports(
        trace_file=trace_file,
        cache_type=cache_type,
        cache_size=str(cache_size),
        seq_start=str(seq_start),
        n_early_stop=str(n_early_stop),
        n_warmup=n_warmup,
        version=parameters['version'],  # use version as a strong key
        dburi=parameters["dburi"],
        dbcollection=parameters["dbcollection"],
    )
    if len(df) == 0:
        next_windows = np.linspace(1,
                                   int(0.4 * n_early_stop),
                                   args_global['n_beam'] + 1,
                                   dtype=int)[1:]
    else:
        # window at most 40% of length
        parameter_space = ParameterSpace(
            [ContinuousParameter('x', 1, int(0.4 * n_early_stop))])
        bo = GPBayesianOptimization(
            variables_list=parameter_space.parameters,
            X=df['memory_window'].values.reshape(-1, 1),
            Y=df['byte_miss_ratio'].values.reshape(-1, 1),
            batch_size=args_global['n_beam'])
        next_windows = bo.suggest_new_locations().reshape(-1).astype(int)
    return next_windows
Beispiel #12
0
def test_montecarlo_sensitivity():

    ishigami = Ishigami(a=5, b=0.1)
    space = ParameterSpace([
        ContinuousParameter('x1', -np.pi, np.pi),
        ContinuousParameter('x2', -np.pi, np.pi),
        ContinuousParameter('x3', -np.pi, np.pi)
    ])

    num_mc = 1000
    np.random.seed(0)
    senstivity_ishigami = MonteCarloSensitivity(ishigami.fidelity1, space)
    senstivity_ishigami.generate_samples(1)

    main_sample = np.array([[3.10732573, -1.25504469, -2.66820221],
                            [-1.32105416, -1.45224686, -2.06642419],
                            [2.41144646, -1.98949844, 1.66646315]])
    fixing_sample = np.array([[-2.93034587, -2.62148462, 1.71694805],
                              [-2.70120457, 2.60061313, 3.00826754],
                              [-2.56283615, 2.53817347, -1.00496868]])

    main_effects, total_effects, total_variance = senstivity_ishigami.compute_effects(
        main_sample=main_sample,
        fixing_sample=fixing_sample,
        num_monte_carlo_points=num_mc)

    keys = space.parameter_names

    assert (all(k in main_effects for k in keys))
    assert (all(k in total_effects for k in keys))
Beispiel #13
0
def get_validation_tasks_per_cache_size(trace_file, cache_type, cache_size, parameters, args_global, df):
    n_req = parameters['n_req']
    n_validation = int(n_req * args_global['ratio_validation'])
    n_iteration = args_global['n_iteration']
    n_beam = args_global['n_beam']
    if len(df) == 0 or len(df[df['cache_size'] == cache_size]) == 0:
        # init value
        next_windows = np.linspace(1, int(0.4 * n_validation), args_global['n_beam'] + 1, dtype=int)[1:]
        tasks = []
        for memory_window in next_windows:
            # override n_early stop
            task = _get_task(trace_file, cache_type, cache_size, parameters, n_validation, memory_window)
            tasks.append(task)
        return tasks

    # as emukit output is random, don't assume same points each time, as long as # point is enough
    df1 = df[df['cache_size'] == cache_size]
    if len(df1) >= n_iteration * n_beam:
        return []
    # next round
    # window at most 40% of length
    # add xs, ys in a consistent order otherwise the results will be difference
    parameter_space = ParameterSpace([ContinuousParameter('x', 1, int(0.4 * n_validation))])
    bo = GPBayesianOptimization(variables_list=parameter_space.parameters,
                                X=df1['memory_window'].values.reshape(-1, 1),
                                Y=df1['byte_miss_ratio'].values.reshape(-1, 1),
                                batch_size=args_global['n_beam'])
    next_windows = bo.suggest_new_locations().reshape(-1).astype(int)
    tasks = []
    for memory_window in next_windows:
        task = _get_task(trace_file, cache_type, cache_size, parameters, n_validation, memory_window)
        tasks.append(task)
    return tasks
Beispiel #14
0
def Intervention_function(*interventions, model, target_variable,
                          min_intervention, max_intervention):
    num_samples = 100000

    assert len(min_intervention) == len(interventions[0])
    assert len(max_intervention) == len(interventions[0])

    def compute_target_function_fcn(value):
        num_interventions = len(interventions[0])
        for i in range(num_interventions):
            interventions[0][list(interventions[0].keys())[i]] = value[0, i]

        mutilated_model = intervene_dict(model, **interventions[0])
        np.random.seed(1)
        samples = [
            sample_from_model(mutilated_model) for _ in range(num_samples)
        ]
        samples = pd.DataFrame(samples)
        return np.asarray(np.mean(samples['Y']))[np.newaxis, np.newaxis]

    ## Define parameter space
    list_parameter = [None] * len(interventions[0])
    for i in range(len(interventions[0])):
        list_parameter[i] = ContinuousParameter(
            list(interventions[0].keys())[i], min_intervention[i],
            max_intervention[i])

    return (compute_target_function_fcn, ParameterSpace(list_parameter))
Beispiel #15
0
def catg_space():
    return ParameterSpace([
        ContinuousParameter('x1', 0, 15),
        CategoricalParameter('x2', OneHotEncoding([0, 1, 2, 3])),
        CategoricalParameter('x3', OneHotEncoding([1, 2, 3, 4, 5])),
        ContinuousParameter('x4', -2, 3)
    ])
Beispiel #16
0
def bayesian_opt():

    # 2. ranges of the synth parameters
    syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158)
    syn6 = np.arange(6000)
    syn7 = np.arange(1000)
    syn8 = np.arange(700)

    # 2. synth paramters ranges into an 8D parameter space
    # parameter_space = ParameterSpace(
    #     [ContinuousParameter('x1', 0., 157.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x8', syn8)])

    parameter_space = ParameterSpace(
        [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
         ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
         ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x1', syn1), DiscreteParameter('x2', syn2), DiscreteParameter('x3', syn3),
    #      DiscreteParameter('x4', syn4), DiscreteParameter('x5', syn5), DiscreteParameter('x6', syn6),
    #      DiscreteParameter('x7', syn1), DiscreteParameter('x8', syn8)])

    # 3. collect random points
    design = RandomDesign(parameter_space)

    X = design.get_samples(num_data_points)  # X is a numpy array
    print("X=", X)

    # [is the below needed?]
    # UserFunction.evaluate(training_function, X)
    # I put UserFunctionWrapper in line 94

    # 4. define training_function as Y
    Y = training_function(X)

    # [is this needed?]
    # loop_state = create_loop_state(X, Y)

    # 5. train and wrap the model in Emukit
    model_gpy = GPRegression(X, Y, normalizer=True)

    model_emukit = GPyModelWrapper(model_gpy)
    expected_improvement = ExpectedImprovement(model=model_emukit)
    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=expected_improvement,
                                             batch_size=5)

    max_iterations = 15
    bayesopt_loop.run_loop(training_function, max_iterations)
    model_gpy.plot()
    plt.show()
    results = bayesopt_loop.get_results()
    # bayesopt_loop.loop_state.X
    print("X: ", bayesopt_loop.loop_state.X)
    print("Y: ", bayesopt_loop.loop_state.Y)
    print("cost: ", bayesopt_loop.loop_state.cost)
Beispiel #17
0
def multi_fidelity_borehole_function(high_noise_std_deviation=0, low_noise_std_deviation=0):
    """
    Two level borehole function.

    The Borehole function models water flow through a borehole. Its simplicity and quick evaluation makes it a commonly
    used function for testing a wide variety of methods in computer experiments.

    See reference for equations:
    https://www.sfu.ca/~ssurjano/borehole.html

    :param high_noise_std_deviation: Standard deviation of Gaussian observation noise on high fidelity observations.
                                     Defaults to zero.
    :param low_noise_std_deviation: Standard deviation of Gaussian observation noise on low fidelity observations.
                                     Defaults to zero.
    :return: Tuple of user function object and parameter space
    """
    parameter_space = ParameterSpace([
        ContinuousParameter('borehole_radius', 0.05, 0.15),
        ContinuousParameter('radius_of_influence', 100, 50000),
        ContinuousParameter('upper_aquifer_transmissivity', 63070, 115600),
        ContinuousParameter('upper_aquifer_head', 990, 1110),
        ContinuousParameter('lower_aquifer_transmissivity', 63.1, 116),
        ContinuousParameter('lower_aquifer_head', 700, 820),
        ContinuousParameter('borehole_length', 1120, 1680),
        ContinuousParameter('hydraulic_conductivity', 9855, 12045),
        InformationSourceParameter(2)])

    user_function = MultiSourceFunctionWrapper([
        lambda x: borehole_low(x, low_noise_std_deviation),
        lambda x: borehole_high(x, high_noise_std_deviation)])

    return user_function, parameter_space
Beispiel #18
0
def multi_fidelity_forrester_function(high_fidelity_noise_std_deviation=0, low_fidelity_noise_std_deviation=0):
    """
    Two-level multi-fidelity forrester function where the high fidelity is given by:

    .. math::
        f(x) = (6x - 2)^2 \sin(12x - 4)

    and the low fidelity approximation given by:

    .. math::
        f_{low}(x) = 0.5 f_{high}(x) + 10 (x - 0.5) + 5

    :param high_fidelity_noise_std_deviation: Standard deviation of observation noise on high fidelity observations.
                                              Defaults to zero.
    :param low_fidelity_noise_std_deviation: Standard deviation of observation noise on low fidelity observations.
                                             Defaults to zero.
    :return: Tuple of user function object and parameter space object
    """
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1), InformationSourceParameter(2)])
    user_function = MultiSourceFunctionWrapper(
        [
            lambda x: forrester_low(x, low_fidelity_noise_std_deviation),
            lambda x: forrester(x, high_fidelity_noise_std_deviation),
        ]
    )
    return user_function, parameter_space
Beispiel #19
0
def multi_source_entropy_search_acquisition(gpy_model):
    space = ParameterSpace(
        [ContinuousParameter("x1", 0, 1),
         InformationSourceParameter(2)])
    return MultiInformationSourceEntropySearch(gpy_model,
                                               space,
                                               num_representer_points=10)
Beispiel #20
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init, y_constraint_init = f(x_init)

    # Make GPy objective model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Make GPy constraint model
    gpy_constraint_model = GPy.models.GPRegression(x_init, y_init)
    constraint_model = GPyModelWrapper(gpy_constraint_model)

    space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = UnknownConstraintBayesianOptimizationLoop(
        model_objective=model, space=space, acquisition=acquisition, model_constraint=constraint_model
    )
    bo.run_loop(
        UserFunctionWrapper(f, extra_output_names=["Y_constraint"]), FixedIterationsStoppingCondition(n_iterations)
    )

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5
Beispiel #21
0
def Hartmann3():
    parameter_space = ParameterSpace([
        ContinuousParameter('x1', 0, 1),
        ContinuousParameter('x2', 0, 1),
        ContinuousParameter('x3', 0, 1)
    ])
    return _Hartmann3, parameter_space
Beispiel #22
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(
        acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
Beispiel #23
0
    def initialize(self):
        parameter_space = ParameterSpace([
            ContinuousParameter("x%d" % index, bounds[0], bounds[1])
            for index, bounds in enumerate(self.problem.bounds)
        ] + [InformationSourceParameter(len(self.problem.fidelities))])

        # Obtain initial sample
        design = LatinDesign(parameter_space)
        initial_parameters = design.get_samples(self.initial_sample_count)
        initial_response = self._evaluate_batch(initial_parameters)

        kernels = [GPy.kern.RBF(1)] * len(self.problem.fidelities)
        kernel = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(kernels)

        model = GPyLinearMultiFidelityModel(
            initial_parameters, initial_response,
            kernel, n_fidelities = len(self.problem.fidelities)
        )

        model = GPyMultiOutputWrapper(model, len(self.problem.fidelities))
        acquisition = NegativeLowerConfidenceBound(model)

        self.loop = BayesianOptimizationLoop(
            model = model, space = parameter_space,
            acquisition = acquisition, batch_size = self.batch_size
        )
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ['past', 'present', 'yet to come']
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace([
        ContinuousParameter('real_param', 0.0, 1.0),
        CategoricalParameter('categorical_param', encoding)
    ])

    random_design = LatinDesign(parameter_space)
    x_init = random_design.get_samples(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    loop = ExperimentalDesignLoop(parameter_space, model)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
Beispiel #25
0
def multi_fidelity_non_linear_sin(high_fidelity_noise_std_deviation=0,
                                  low_fidelity_noise_std_deviation=0):
    """
    Two level non-linear sin function where high fidelity is given by:

    .. math::
        f_{high}(x) = (x - \sqrt{2}) f_{low}(x)^2

    and the low fidelity is:

    .. math::
        f_{low}(x) = \sin(8 \pi x)

    Reference:
    Nonlinear information fusion algorithms for data-efficient multi-fidelity modelling.
    P. Perdikaris, M. Raissi, A. Damianou, N. D. Lawrence and G. E. Karniadakis (2017)
    http://web.mit.edu/parisp/www/assets/20160751.full.pdf
    """

    parameter_space = ParameterSpace(
        [ContinuousParameter("x1", -5, 10),
         InformationSourceParameter(2)])
    user_function = MultiSourceFunctionWrapper([
        lambda x: nonlinear_sin_low(x, low_fidelity_noise_std_deviation),
        lambda x: nonlinear_sin_high(x, high_fidelity_noise_std_deviation),
    ])
    return user_function, parameter_space
def test_continuous_entropy_search():
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, 1)
    s_min = 10
    s_max = 10000
    s = np.random.uniform(s_min, s_max, x_init.shape[0])
    x_init = np.concatenate((x_init, s[:, None]), axis=1)
    y_init = rng.rand(5, 1)

    model = FabolasModel(X_init=x_init,
                         Y_init=y_init,
                         s_min=s_min,
                         s_max=s_max)

    space = ParameterSpace([
        ContinuousParameter("x", 0, 1),
        ContinuousParameter("s", np.log(s_min), np.log(s_max))
    ])

    es = ContinuousFidelityEntropySearch(model,
                                         space,
                                         num_representer_points=10)
    es.update_pmin()

    assert np.all(es.representer_points[:, -1] == np.log(s_max))
def test_random_design_returns_correct_number_of_points():
    p = ContinuousParameter('c', 1.0, 5.0)
    space = ParameterSpace([p])
    points_count = 5

    points = RandomDesign(space).get_samples(points_count)

    assert points_count == len(points)
Beispiel #28
0
def create_bayesian_optimization_loop(
        gpy_model: ComparisonGP, lims: np.array, batch_size: int,
        acquisition: AcquisitionFunction) -> BayesianOptimizationLoop:
    """
    Creates Bayesian optimization loop for Bayesian neural network or random forest models.
    :param gpy_model: the GPy model used in optimization
    :param lims: Optimization limits for the inputs
    :param batch_size: number of observations used in batch
    :param acquisition: acquisition function used in the bayesian optimization
    :return: emukit BO loop
    """

    # Create model
    model = ComparisonGPEmukitWrapper(gpy_model, batch_size)

    # Create acquisition
    emukit_acquisition = EmukitAcquisitionFunctionWrapper(model, acquisition)

    if type(emukit_acquisition.acquisitionFunction) is ThompsonSampling:
        parameter_space = []
        for j in range(len(lims)):
            parameter_space += [
                ContinuousParameter("x{}".format(j), lims[j][0], lims[j][1])
            ]
        parameter_space = ParameterSpace(parameter_space)
        acquisition_optimizer = SequentialGradientAcquisitionOptimizer(
            parameter_space, batch_size)
    else:
        parameter_space = []
        for k in range(batch_size):
            for j in range(len(lims)):
                parameter_space += [
                    ContinuousParameter("x{}{}".format(k, j), lims[j][0],
                                        lims[j][1])
                ]
        parameter_space = ParameterSpace(parameter_space)
        acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)

    bo = BayesianOptimizationLoop(model=model,
                                  space=parameter_space,
                                  acquisition=emukit_acquisition)
    return BayesianOptimizationLoop(
        model=model,
        space=parameter_space,
        acquisition=emukit_acquisition,
        acquisition_optimizer=acquisition_optimizer)
Beispiel #29
0
def test_random_sampling_with_context():
    space = ParameterSpace([ContinuousParameter("x", 0, 1), ContinuousParameter("y", 0, 1)])
    rs = RandomSampling(space)
    loop_state_mock = mock.create_autospec(LoopState)
    next_points = rs.compute_next_points(loop_state_mock, context={"x": 0.25})
    assert len(next_points) == 1
    # Context value should be what we set
    assert np.isclose(next_points[0, 0], 0.25)
Beispiel #30
0
def test_random_sampling_without_context():
    space = ParameterSpace(
        [ContinuousParameter('x', 0, 1),
         ContinuousParameter('y', 0, 1)])
    rs = RandomSampling(space)
    loop_state_mock = mock.create_autospec(LoopState)
    next_points = rs.compute_next_points(loop_state_mock)
    assert (len(next_points) == 1)