Exemplo n.º 1
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init, y_constraint_init = f(x_init)

    # Make GPy objective model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Make GPy constraint model
    gpy_constraint_model = GPy.models.GPRegression(x_init, y_init)
    constraint_model = GPyModelWrapper(gpy_constraint_model)

    space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = UnknownConstraintBayesianOptimizationLoop(
        model_objective=model, space=space, acquisition=acquisition, model_constraint=constraint_model
    )
    bo.run_loop(
        UserFunctionWrapper(f, extra_output_names=["Y_constraint"]), FixedIterationsStoppingCondition(n_iterations)
    )

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5
Exemplo n.º 2
0
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ['past', 'present', 'yet to come']
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace([
        ContinuousParameter('real_param', 0.0, 1.0),
        CategoricalParameter('categorical_param', encoding)
    ])

    random_design = RandomDesign(parameter_space)
    x_init = random_design.get_samples(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    acquisition = ExpectedImprovement(model)

    loop = BayesianOptimizationLoop(parameter_space, model, acquisition)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(
        np.logical_or(loop.loop_state.X[:, 1:3] == 0.0,
                      loop.loop_state.X[:, 1:3] == 1.0))
Exemplo n.º 3
0
def test_multi_source_batch_experimental_design():
    objective, space = multi_fidelity_forrester_function()

    # Create initial data
    random_design = RandomDesign(space)
    x_init = random_design.get_samples(10)
    intiial_results = objective.evaluate(x_init)
    y_init = np.array([res.Y for res in intiial_results])

    # Create multi source acquisition optimizer
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(
        acquisition_optimizer, space)

    # Create GP model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Create acquisition
    acquisition = ModelVariance(model)

    # Create batch candidate point calculator
    batch_candidate_point_calculator = GreedyBatchPointCalculator(
        model, acquisition, multi_source_acquisition_optimizer, batch_size=5)

    initial_loop_state = LoopState(intiial_results)
    loop = OuterLoop(batch_candidate_point_calculator,
                     FixedIntervalUpdater(model, 1), initial_loop_state)

    loop.run_loop(objective, 10)
    assert loop.loop_state.X.shape[0] == 60
Exemplo n.º 4
0
def bayesian_opt():

    # 2. ranges of the synth parameters
    syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158)
    syn6 = np.arange(6000)
    syn7 = np.arange(1000)
    syn8 = np.arange(700)

    # 2. synth paramters ranges into an 8D parameter space
    # parameter_space = ParameterSpace(
    #     [ContinuousParameter('x1', 0., 157.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x8', syn8)])

    parameter_space = ParameterSpace(
        [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
         ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
         ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x1', syn1), DiscreteParameter('x2', syn2), DiscreteParameter('x3', syn3),
    #      DiscreteParameter('x4', syn4), DiscreteParameter('x5', syn5), DiscreteParameter('x6', syn6),
    #      DiscreteParameter('x7', syn1), DiscreteParameter('x8', syn8)])

    # 3. collect random points
    design = RandomDesign(parameter_space)

    X = design.get_samples(num_data_points)  # X is a numpy array
    print("X=", X)

    # [is the below needed?]
    # UserFunction.evaluate(training_function, X)
    # I put UserFunctionWrapper in line 94

    # 4. define training_function as Y
    Y = training_function(X)

    # [is this needed?]
    # loop_state = create_loop_state(X, Y)

    # 5. train and wrap the model in Emukit
    model_gpy = GPRegression(X, Y, normalizer=True)

    model_emukit = GPyModelWrapper(model_gpy)
    expected_improvement = ExpectedImprovement(model=model_emukit)
    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=expected_improvement,
                                             batch_size=5)

    max_iterations = 15
    bayesopt_loop.run_loop(training_function, max_iterations)
    model_gpy.plot()
    plt.show()
    results = bayesopt_loop.get_results()
    # bayesopt_loop.loop_state.X
    print("X: ", bayesopt_loop.loop_state.X)
    print("Y: ", bayesopt_loop.loop_state.Y)
    print("cost: ", bayesopt_loop.loop_state.cost)
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ["past", "present", "yet to come"]
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace(
        [ContinuousParameter("real_param", 0.0, 1.0), CategoricalParameter("categorical_param", encoding)]
    )

    random_design = LatinDesign(parameter_space)
    x_init = random_design.get_samples(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    loop = ExperimentalDesignLoop(parameter_space, model)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
Exemplo n.º 6
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(
        acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
Exemplo n.º 7
0
def gpy_model(n_dims):
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, n_dims)
    y_init = rng.rand(5, 1)
    gpy_model = GPy.models.GPRegression(x_init, y_init, GPy.kern.RBF(n_dims))
    np.random.seed(42)
    gpy_model.randomize()
    return GPyModelWrapper(gpy_model)
Exemplo n.º 8
0
def gpy_model_mcmc(n_dims):
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, n_dims)
    y_init = rng.rand(5, 1)
    gpy_model = GPy.models.GPRegression(x_init, y_init, GPy.kern.RBF(n_dims))
    gpy_model.kern.set_prior(GPy.priors.Uniform(0, 5))
    np.random.seed(42)
    gpy_model.randomize()
    return GPyModelWrapper(gpy_model)
def run_optimisation(current_range, freq_range, power_range):
    parameter_space = ParameterSpace([\
        ContinuousParameter('current', current_range[0], current_range[1]), \
        ContinuousParameter('freq', freq_range[0], freq_range[1]), \
        ContinuousParameter('power', power_range[0], power_range[1])
        ])

    def function(X):
        current = X[:, 0]
        freq = X[:, 1]
        power = X[:, 2]
        out = np.zeros((len(current), 1))
        for g in range(len(current)):
            '''
            Set JPA Current, Frequency & Power
            '''
            out[g, 0] = -get_SNR(
                plot=False)[-1]  #Negative as want to maximise SNR
        return out

    num_data_points = 10

    design = RandomDesign(parameter_space)
    X = design.get_samples(num_data_points)
    Y = function(X)

    model_gpy = GPRegression(X, Y)
    model_gpy.optimize()
    model_emukit = GPyModelWrapper(model_gpy)

    exp_imprv = ExpectedImprovement(model=model_emukit)
    optimizer = GradientAcquisitionOptimizer(space=parameter_space)
    point_calc = SequentialPointCalculator(exp_imprv, optimizer)
    coords = []
    min = []

    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=exp_imprv,
                                             batch_size=1)

    stopping_condition = FixedIterationsStoppingCondition(i_max=100)

    bayesopt_loop.run_loop(q, stopping_condition)

    coord_results = bayesopt_loop.get_results().minimum_location
    min_value = bayesopt_loop.get_results().minimum_value
    step_results = bayesopt_loop.get_results().best_found_value_per_iteration
    print(coord_results)
    print(min_value)

    return coord_results, abs(min_value)
Exemplo n.º 10
0
def test_loop_initial_state():
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    exp_design = ExperimentalDesignLoop(space, model)

    # test loop state initialization
    assert_array_equal(exp_design.loop_state.X, x_init)
    assert_array_equal(exp_design.loop_state.Y, y_init)
def test_cost_sensitive_bayesian_optimization_loop():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    x_init = np.random.rand(10, 1)

    def function_with_cost(x):
        return np.sin(x), x

    user_fcn = UserFunctionWrapper(function_with_cost)

    y_init, cost_init = function_with_cost(x_init)

    gpy_model_objective = GPy.models.GPRegression(x_init, y_init)
    gpy_model_cost = GPy.models.GPRegression(x_init, cost_init)

    model_objective = GPyModelWrapper(gpy_model_objective)
    model_cost = GPyModelWrapper(gpy_model_cost)

    loop = CostSensitiveBayesianOptimizationLoop(space, model_objective, model_cost)
    loop.run_loop(user_fcn, 10)

    assert loop.loop_state.X.shape[0] == 20
    assert loop.loop_state.cost.shape[0] == 20
def test_acquisition_gradient_multipoint_expected_improvement():
    """
    Check the q-EI acquisition function gradients with numeric differentiation
    """
    x_init = np.random.rand(3, 1)
    y_init = np.random.rand(3, 1)
    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    x0 = np.array([0.45, 0.55])
    _check_grad(MultipointExpectedImprovement(model), TOL_GRAD, x0)
    _check_grad(
        MultipointExpectedImprovement(model, fast_compute=True, eps=1e-3),
        TOL_GRAD_FAST, x0)
Exemplo n.º 13
0
def test_local_penalization():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    acquisition = ExpectedImprovement(model)
    batch_size = 5
    lp_calc = LocalPenalizationPointCalculator(acquisition,
                                               acquisition_optimizer, model,
                                               parameter_space, batch_size)

    loop_state = create_loop_state(x_init, y_init)
    new_points = lp_calc.compute_next_points(loop_state)
    assert new_points.shape == (batch_size, 1)
Exemplo n.º 14
0
def test_optimization_with_linear_constraint():
    branin_fcn, parameter_space = branin_function()
    x_init = parameter_space.sample_uniform(10)
    y_init = branin_fcn(x_init)

    A = np.array([[1.0, 1.0]])
    b_lower = np.array([-5])
    b_upper = np.array([5])
    parameter_space.constraints = [
        LinearInequalityConstraint(A, np.array([-5]), np.array([5]))
    ]

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    lp = BayesianOptimizationLoop(parameter_space, model)
    lp.run_loop(branin_fcn, 5)

    assert True
Exemplo n.º 15
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition = ModelVariance(model)

    # Make loop and collect points
    exp_design = ExperimentalDesignLoop(space, model, acquisition)
    exp_design.run_loop(UserFunctionWrapper(f), FixedIterationsStoppingCondition(n_iterations))

    # Check we got the correct number of points
    assert exp_design.loop_state.X.shape[0] == 10
def test_local_penalization():
    np.random.seed(123)
    branin_fcn, parameter_space = branin_function()
    x_init = parameter_space.sample_uniform(10)

    y_init = branin_fcn(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    base_acquisition = ExpectedImprovement(model)

    batch_size = 10
    update_interval = 1

    lp = BayesianOptimizationLoop(parameter_space, model, base_acquisition,
                                  update_interval, batch_size)
    lp.run_loop(branin_fcn, 5)

    assert len(lp.loop_state.Y) == 60
Exemplo n.º 17
0
# Run the simulation a number of times to get some datapoints for the emulator
from emukit.core.initial_designs import RandomDesign

design = RandomDesign(space)
x = design.get_samples(100)
# NB this takes a while to run
y = np.array([simulation(k, config_details)['Effective R']
              for k in x])[:, np.newaxis]

# Use GP regression as the emulator
from GPy.models import GPRegression
from emukit.model_wrappers import GPyModelWrapper
from emukit.sensitivity.monte_carlo import MonteCarloSensitivity

model_gpy = GPRegression(x, y)
model_emukit = GPyModelWrapper(model_gpy)
model_emukit.optimize()

# Run Monte Carlo estimation of Sobol indices on the emulator
num_monte_carlo = 10000
sens_gpbased = MonteCarloSensitivity(model=model_emukit, input_domain=space)
main_effects_gp, total_effects_gp, _ = sens_gpbased.compute_effects(
    num_monte_carlo_points=num_monte_carlo)
main_effects_gp = {ivar: main_effects_gp[ivar][0] for ivar in main_effects_gp}
total_effects_gp = {
    ivar: total_effects_gp[ivar][0]
    for ivar in total_effects_gp
}

# First order sobol indices:
fig, ax = plt.subplots(figsize=(10, 5))
Exemplo n.º 18
0
parameter_space = ParameterSpace([ContinuousParameter('x1', 0., 157.)])

# parameter_space = ParameterSpace(
#     [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
#      ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
#      ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

latin_design = LatinDesign(parameter_space=parameter_space)
X0 = latin_design.get_samples(n_samples)
Y0 = training_function(X0)
#D0 = ((Y0 - target)**2).sum(axis=1)
#plotter = BayesOptPlotter(h_noiseless, target, xmin, xmax, X0=X0, Y0=Y0)

model = GPRegression(X0, Y0)
model_wrapped = GPyModelWrapper(model)
target = user_sample_vector
acq = L2_LCB(model=model_wrapped, target=target)

fit_update = lambda a, b: model.optimize_restarts(verbose=False)
bayesopt_loop = BayesianOptimizationLoop(model=model_wrapped,
                                         space=parameter_space,
                                         acquisition=acq)
bayesopt_loop.iteration_end_event.append(fit_update)
bayesopt_loop.run_loop(training_function, 5)

# 5. train and wrap the model in Emukit
# model_gpy = GPRegression(X, Y, normalizer=True)
#
# model_emukit = GPyModelWrapper(model_gpy)
# expected_improvement = ExpectedImprovement(model=model_emukit)
Exemplo n.º 19
0
 def __init__(self, gpy_model, n_restarts: int = 1):
   GPyModelWrapper.__init__(self, gpy_model, n_restarts)
   self.time_count = 0
Exemplo n.º 20
0
def main():
    print("######################")
    global target, X0, Y0, values, frac_M, frac_X, bo_flag

    #target_params = np.array([[0.14,0.4],[1.4,0.03]])

    #target = LiX_wrapper(True,'LiF','Rocksalt','JC',
    #                     target_params,False,False,eng)

    target = np.array([[-764.5, 6.012 * 0.99, 6.012 * 0.99, 6.012 * 0.99]])

    if focus == 'energy':
        target_comp = target[0, 0].reshape(1, -1)
    if focus == 'constant':
        target_comp = target[0, 1].reshape(1, -1)
    else:
        target_comp = target[0, :4].reshape(1, -1)

    print('Target initialized!')

    latin_design = LatinDesign(parameter_space=parameter_space)
    X0 = latin_design.get_samples(INIT_POINTS)
    Y0 = np.array([])
    for x in X0:
        x = np.array([x])
        Y0 = np.append(Y0, f.evaluate(x))
    values = []

    for y in Y0:
        values.append(y.Y)

    values = np.asarray(values, dtype=float)

    ### Redundancy check
    if (values[:, 7:-1] == values[0, 7]).all():
        values = values[:, :7]
        frac_X = False

    if (values[:, 4:7] == values[0, 4]).all():
        values = values[:, :4]
        frac_M = False

    values = values.reshape(-1, np.max(np.shape(target)))
    bo_flag = True

    if focus == 'energy':
        values = values[:, 0].reshape(-1, 1)
    if focus == 'constant':
        values = values[:, 1:4].reshape(-1, 3)

    ### BO Loop
    kern = Matern52(X0.shape[1], variance=1)
    model = GPRegression(X0,
                         values,
                         kernel=kern,
                         normalizer=True,
                         noise_var=NOISE)  # Kernel = None: RBF default

    model.optimize(optimizer='lbfgsb')
    model.optimize_restarts(num_restarts=50, verbose=False)
    model_wrapped = GPyModelWrapper(model)

    acq = L2_LCB(model=model_wrapped, target=target_comp, beta=np.float64(1.))
    # beta is the exploration constant
    bayesopt_loop = BayesianOptimizationLoop(model=model_wrapped,
                                             space=parameter_space,
                                             acquisition=acq)
    bayesopt_loop.run_loop(f, BO_ITER)

    return save(bayesopt_loop)
Exemplo n.º 21
0
 def make_loop(loop_state):
     gpy_model = GPy.models.GPRegression(loop_state.X, loop_state.Y)
     model = GPyModelWrapper(gpy_model)
     return BayesianOptimizationLoop(space, model)
Exemplo n.º 22
0
    EI_totalR = []
    PI_totalR = []
    for ep in range(30):

        fun = env.reset(upper_bound=1, lower_bound=0)
        ppo.ppoMax = 0
        ppoMin = float('inf')
        ppo.ep_r = 0
        ppo.funCurMax = env.maxVal
        ppo.curFun = env.getCurFun()

        design = RandomDesign(parameter_space)  # Collect random points
        X = design.get_samples(num_data_points)
        Y = fun(X)
        model_gpy = GPRegression(X, Y)  # Train and wrap the model in Emukit
        model_emukit = GPyModelWrapper(model_gpy)
        ppo.model = model_emukit
        bo = BayesianOptimizationLoop(model=model_emukit,
                                      space=parameter_space,
                                      acquisition=ppo,
                                      batch_size=1)
        mu = np.array([np.mean(bo.loop_state.X)])[np.newaxis]
        var = np.array([np.var(bo.loop_state.X)])[np.newaxis]
        s = np.concatenate((mu, var), axis=1)
        boPPOep_r = []

        model_gpyEI = GPRegression(X, Y)  # Train and wrap the model in Emukit
        model_emukitEI = GPyModelWrapper(model_gpyEI)
        boEI = BayesianOptimizationLoop(model=model_emukitEI,
                                        space=parameter_space,
                                        acquisition=ExpectedImprovement(
Exemplo n.º 23
0
    for ep in range(EP_MAX):
        
        fun = env.reset(upper_bound=1,lower_bound=0)
        # ppo.ppoMax = 0
        ppo.ppoMin = env.maxVal
        ppo.ep_r = 0
        boPPOep_r = []
        # ppo.funCurMax = env.maxVal
        ppo.funCurMin = env.minVal
        ppo.curFun = env.getCurFun()

        design = RandomDesign(parameter_space) # Collect random points
        X = design.get_samples(num_data_points)
        Y = fun(X)
        model_gpy = GPRegression(X,Y) # Train and wrap the model in Emukit
        model_emukit = GPyModelWrapper(model_gpy)
        ppo.model = model_emukit
        bo = BayesianOptimizationLoop(model = model_emukit,
                                         space = parameter_space,
                                         acquisition = ppo,
                                         batch_size = 1)
        mu_, var_ = bo.model.predict(bo.loop_state.X[-1].reshape(-1,1))
        ppo.s_ = np.concatenate((mu_,var_),axis=1)                                
        for t in range(EP_LEN):    # in one episode
        
            bo.run_loop(fun, 1)
            # ppo.ppoMax = max(ppo.ppoMax,env.curFun(bo.loop_state.X[-1]))
            ppo.ppoMin = min(ppo.ppoMin,env.curFun(bo.loop_state.X[-1]))
            
            
            # boPPOep_r.append(ppo.ppoMax-env.maxVal)
Exemplo n.º 24
0
def model():
    x_init = np.random.rand(5, 2)
    y_init = np.random.rand(5, 1)
    model = GPRegression(x_init, y_init, RBF(2))
    return GPyModelWrapper(model)