class ExpectedImprovementPerCost(Acquisition):

    def __init__(self, model: Union[IModel, IDifferentiable], cost_model: Union[IModel, IDifferentiable],
                 jitter: np.float64 = np.float64(0))-> None:
        """
        This acquisition computes for a given input the improvement over the current best observed value in
        expectation. For more information see:

        Efficient Global Optimization of Expensive Black-Box Functions
        Jones, Donald R. and Schonlau, Matthias and Welch, William J.
        Journal of Global Optimization

        :param model: model that is used to compute the improvement.
        :param jitter: parameter to encourage extra exploration.
        """

        self.model = model
        self.cost_model = cost_model
        self.jitter = jitter

        self.ei = ExpectedImprovement(model, jitter)

    def evaluate(self, x: np.ndarray) -> np.ndarray:
        """
        Computes the Expected Improvement.

        :param x: points where the acquisition is evaluated.
        """
        improvement = self.ei.evaluate(x)

        mean, _ = self.cost_model.predict(x)

        return improvement / mean

    def evaluate_with_gradients(self, x: np.ndarray) -> Tuple:
        """
        Computes the Expected Improvement and its derivative.

        :param x: locations where the evaluation with gradients is done.
        """

        improvement, dimprovement_dx = self.ei.evaluate_with_gradients(x)

        mean, _ = self.cost_model.predict(x)

        dmean_dx, _ = self.model.get_prediction_gradients(x)

        return improvement / mean, (dimprovement_dx * mean - dmean_dx * improvement) / (mean ** 2)

    def has_gradients(self) -> bool:
        """Returns that this acquisition has gradients"""
        return True
Пример #2
0
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ['past', 'present', 'yet to come']
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace([
        ContinuousParameter('real_param', 0.0, 1.0),
        CategoricalParameter('categorical_param', encoding)
    ])

    random_design = RandomDesign(parameter_space)
    x_init = random_design.get_samples(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    acquisition = ExpectedImprovement(model)

    loop = BayesianOptimizationLoop(parameter_space, model, acquisition)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(
        np.logical_or(loop.loop_state.X[:, 1:3] == 0.0,
                      loop.loop_state.X[:, 1:3] == 1.0))
Пример #3
0
def bayesian_opt():

    # 2. ranges of the synth parameters
    syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158)
    syn6 = np.arange(6000)
    syn7 = np.arange(1000)
    syn8 = np.arange(700)

    # 2. synth paramters ranges into an 8D parameter space
    # parameter_space = ParameterSpace(
    #     [ContinuousParameter('x1', 0., 157.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x8', syn8)])

    parameter_space = ParameterSpace(
        [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.),
         ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.),
         ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)])

    # parameter_space = ParameterSpace(
    #     [DiscreteParameter('x1', syn1), DiscreteParameter('x2', syn2), DiscreteParameter('x3', syn3),
    #      DiscreteParameter('x4', syn4), DiscreteParameter('x5', syn5), DiscreteParameter('x6', syn6),
    #      DiscreteParameter('x7', syn1), DiscreteParameter('x8', syn8)])

    # 3. collect random points
    design = RandomDesign(parameter_space)

    X = design.get_samples(num_data_points)  # X is a numpy array
    print("X=", X)

    # [is the below needed?]
    # UserFunction.evaluate(training_function, X)
    # I put UserFunctionWrapper in line 94

    # 4. define training_function as Y
    Y = training_function(X)

    # [is this needed?]
    # loop_state = create_loop_state(X, Y)

    # 5. train and wrap the model in Emukit
    model_gpy = GPRegression(X, Y, normalizer=True)

    model_emukit = GPyModelWrapper(model_gpy)
    expected_improvement = ExpectedImprovement(model=model_emukit)
    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=expected_improvement,
                                             batch_size=5)

    max_iterations = 15
    bayesopt_loop.run_loop(training_function, max_iterations)
    model_gpy.plot()
    plt.show()
    results = bayesopt_loop.get_results()
    # bayesopt_loop.loop_state.X
    print("X: ", bayesopt_loop.loop_state.X)
    print("Y: ", bayesopt_loop.loop_state.Y)
    print("cost: ", bayesopt_loop.loop_state.cost)
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = BayesianOptimizationLoop(model=model,
                                  space=space,
                                  acquisition=acquisition)
    bo.run_loop(UserFunctionWrapper(f),
                FixedIterationsStoppingCondition(n_iterations))

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5

    # Check the obtained results
    results = bo.get_results()

    assert results.minimum_location.shape[0] == 1
    assert results.best_found_value_per_iteration.shape[0] == n_iterations + 5
Пример #5
0
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init, y_constraint_init = f(x_init)

    # Make GPy objective model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Make GPy constraint model
    gpy_constraint_model = GPy.models.GPRegression(x_init, y_init)
    constraint_model = GPyModelWrapper(gpy_constraint_model)

    space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = UnknownConstraintBayesianOptimizationLoop(
        model_objective=model, space=space, acquisition=acquisition, model_constraint=constraint_model
    )
    bo.run_loop(
        UserFunctionWrapper(f, extra_output_names=["Y_constraint"]), FixedIterationsStoppingCondition(n_iterations)
    )

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5
    def __init__(self, model: Union[IModel, IDifferentiable], cost_model: Union[IModel, IDifferentiable],
                 jitter: np.float64 = np.float64(0))-> None:
        """
        This acquisition computes for a given input the improvement over the current best observed value in
        expectation. For more information see:

        Efficient Global Optimization of Expensive Black-Box Functions
        Jones, Donald R. and Schonlau, Matthias and Welch, William J.
        Journal of Global Optimization

        :param model: model that is used to compute the improvement.
        :param jitter: parameter to encourage extra exploration.
        """

        self.model = model
        self.cost_model = cost_model
        self.jitter = jitter

        self.ei = ExpectedImprovement(model, jitter)
def test_batch_loop_fails_without_gradients_implemented():
    parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    model = mock.create_autospec(IModel)

    base_acquisition = ExpectedImprovement(model)

    batch_size = 10

    with pytest.raises(ValueError):
        BayesianOptimizationLoop(parameter_space, model, base_acquisition,
                                 batch_size)
Пример #8
0
def test_local_penalization_requires_gradients():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)

    model = mock.create_autospec(IModel)

    acquisition = ExpectedImprovement(model)
    batch_size = 5

    with pytest.raises(ValueError):
        LocalPenalizationPointCalculator(acquisition, acquisition_optimizer,
                                         model, parameter_space, batch_size)
def run_optimisation(current_range, freq_range, power_range):
    parameter_space = ParameterSpace([\
        ContinuousParameter('current', current_range[0], current_range[1]), \
        ContinuousParameter('freq', freq_range[0], freq_range[1]), \
        ContinuousParameter('power', power_range[0], power_range[1])
        ])

    def function(X):
        current = X[:, 0]
        freq = X[:, 1]
        power = X[:, 2]
        out = np.zeros((len(current), 1))
        for g in range(len(current)):
            '''
            Set JPA Current, Frequency & Power
            '''
            out[g, 0] = -get_SNR(
                plot=False)[-1]  #Negative as want to maximise SNR
        return out

    num_data_points = 10

    design = RandomDesign(parameter_space)
    X = design.get_samples(num_data_points)
    Y = function(X)

    model_gpy = GPRegression(X, Y)
    model_gpy.optimize()
    model_emukit = GPyModelWrapper(model_gpy)

    exp_imprv = ExpectedImprovement(model=model_emukit)
    optimizer = GradientAcquisitionOptimizer(space=parameter_space)
    point_calc = SequentialPointCalculator(exp_imprv, optimizer)
    coords = []
    min = []

    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=exp_imprv,
                                             batch_size=1)

    stopping_condition = FixedIterationsStoppingCondition(i_max=100)

    bayesopt_loop.run_loop(q, stopping_condition)

    coord_results = bayesopt_loop.get_results().minimum_location
    min_value = bayesopt_loop.get_results().minimum_value
    step_results = bayesopt_loop.get_results().best_found_value_per_iteration
    print(coord_results)
    print(min_value)

    return coord_results, abs(min_value)
Пример #10
0
def test_local_penalization():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    acquisition = ExpectedImprovement(model)
    batch_size = 5
    lp_calc = LocalPenalizationPointCalculator(acquisition,
                                               acquisition_optimizer, model,
                                               parameter_space, batch_size)

    loop_state = create_loop_state(x_init, y_init)
    new_points = lp_calc.compute_next_points(loop_state)
    assert new_points.shape == (batch_size, 1)
def test_local_penalization():
    np.random.seed(123)
    branin_fcn, parameter_space = branin_function()
    x_init = parameter_space.sample_uniform(10)

    y_init = branin_fcn(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    base_acquisition = ExpectedImprovement(model)

    batch_size = 10
    update_interval = 1

    lp = BayesianOptimizationLoop(parameter_space, model, base_acquisition,
                                  update_interval, batch_size)
    lp.run_loop(branin_fcn, 5)

    assert len(lp.loop_state.Y) == 60
    def _get_proposal_function(self, model, space):

        # Define proposal function for multi-fidelity
        ei = ExpectedImprovement(model)

        def proposal_func(x):
            x_ = x[None, :]
            # Map to highest fidelity
            idx = np.ones((x_.shape[0], 1)) * self.high_fidelity

            x_ = np.insert(x_, self.target_fidelity_index, idx, axis=1)

            if space.check_points_in_domain(x_):
                val = np.log(np.clip(ei.evaluate(x_)[0], 0.0, np.PINF))
                if np.any(np.isnan(val)):
                    return np.array([np.NINF])
                else:
                    return val
            else:
                return np.array([np.NINF])

        return proposal_func
Пример #13
0
            out[g,0] = -simulation_wrapper(host, COMSOL_model, paramfile, w[g], t, l_ind[g], pen, omega, gap_cap[g], w_cap[g], l_cap[g], w_mesa, h_mesa, gap_ind[g])[0]
    return out

# Set up random seeding of parameter space
num_data_points = no_random_seeds
design = RandomDesign(parameter_space)
X = design.get_samples(num_data_points)
Y = q(X)

# Set up emukit model
model_gpy = GPRegression(X,Y)
model_gpy.optimize()
model_emukit = GPyModelWrapper(model_gpy)

# Set up Bayesian optimisation routine
exp_imprv = ExpectedImprovement(model = model_emukit)
optimizer = GradientAcquisitionOptimizer(space = parameter_space)
point_calc = SequentialPointCalculator(exp_imprv,optimizer)

# Bayesian optimisation routine
bayesopt_loop = BayesianOptimizationLoop(model = model_emukit,
                                         space = parameter_space,
                                         acquisition=exp_imprv,
                                         batch_size=1)

stopping_condition = FixedIterationsStoppingCondition(i_max = no_BO_sims)
bayesopt_loop.run_loop(q, stopping_condition)


# Results of Bayesian optimisation
coord_results  = bayesopt_loop.get_results().minimum_location
Пример #14
0
def expected_improvement_acquisition(gpy_model):
    return ExpectedImprovement(gpy_model)
Пример #15
0
base_kernel = GPy.kern.RBF
kernels = make_non_linear_kernels(base_kernel, 2, X_train.shape[1] - 1)
nonlin_mf_model = NonLinearMultiFidelityModel(X_train,
                                              Y_train,
                                              n_fidelities=2,
                                              kernels=kernels,
                                              verbose=True,
                                              optimization_restarts=1)
for m in nonlin_mf_model.models:
    m.Gaussian_noise.variance.fix(0)

nonlin_mf_model.optimize()

from emukit.bayesian_optimization.acquisitions import ExpectedImprovement
ei = ExpectedImprovement(nonlin_mf_model)

ei_locations = np.atleast_2d(np.array([[0.4232, 0.6761]]))

print(ei.evaluate(ei_locations))

# ## Compute mean and variance predictions
#
# hf_mean_nonlin_mf_model, hf_var_nonlin_mf_model = nonlin_mf_model.predict(X_plot_high)
# hf_std_nonlin_mf_model = np.sqrt(hf_var_nonlin_mf_model)
#
# lf_mean_nonlin_mf_model, lf_var_nonlin_mf_model = nonlin_mf_model.predict(X_plot_low)
# lf_std_nonlin_mf_model = np.sqrt(lf_var_nonlin_mf_model)
#
#
# ## Plot posterior mean and variance of nonlinear multi-fidelity model
Пример #16
0
def bo_loop(config, image_path, ai_model=None):
    target_function, space = eval(config.name)()
    data_dim = config.data_dim
    num_mix = config.num_mix
    init_num_data = config.init_num_data
    interval_std = config.interval_std
    interval = np.zeros((1, data_dim))
    std = np.zeros((1, data_dim))
    mean = np.zeros((1, data_dim))
    #set up data, scaling
    for ii in range(data_dim):
        interval[0, ii] = space.parameters[ii].max - space.parameters[ii].min
        std[0, ii] = interval[0, ii] / interval_std
        mean[0, ii] = (space.parameters[ii].max + space.parameters[ii].min) / 2
        space.parameters[ii].min = (space.parameters[ii].min -
                                    mean[0, ii]) / std[0, ii]
        space.parameters[ii].max = (space.parameters[ii].max -
                                    mean[0, ii]) / std[0, ii]

    results_list = [None] * config.repeated_runs
    best_value_per_iter = np.zeros((config.repeated_runs, config.bo_iter))
    npr = np.random.RandomState(123)
    for ii in tqdm(range(config.repeated_runs)):
        #initialize data points
        X_init = (npr.rand(init_num_data, data_dim) - 0.5) * interval + mean
        X_init_norm = (X_init - mean) / std
        Y_init = target_function(X_init)
        Y_init_norm, mean_Y, std_Y = standardize(Y_init)

        # normalized function
        function_norm = lambda x: (target_function(x * std + mean) - mean_Y
                                   ) / std_Y

        if config.is_GPY:
            kernel = GPy.kern.RBF(input_dim=data_dim,
                                  variance=npr.rand(1),
                                  lengthscale=npr.rand(data_dim),
                                  ARD=True)
            for jj in range(num_mix - 1):
                rbf_new = GPy.kern.RBF(input_dim=data_dim,
                                       variance=npr.rand(1),
                                       lengthscale=npr.rand(data_dim),
                                       ARD=True)
                kernel = kernel + rbf_new
            if config.is_sparseGP:
                z = (np.random.rand(config.num_inducing_pts, data_dim) -
                     0.5) * interval_std
                model_gp = GPy.models.SparseGPRegression(X_init_norm,
                                                         Y_init_norm,
                                                         kernel,
                                                         Z=z)
            else:
                model_gp = GPy.models.GPRegression(X_init_norm, Y_init_norm,
                                                   kernel)

            model_gp.Gaussian_noise.variance = config.epsilon
            model_gp.Gaussian_noise.variance.fix()
            model_emukit = GPyModelWrapperTime(model_gp)
            model_emukit.optimize()
        else:
            #Set up Emukit_BO_BQ_GP_Model
            model_emukit = Emukit_BO_BQ_GP_Model(X_init_norm, Y_init_norm,
                                                 config, ai_model)
            model_emukit.optimize()
            model_emukit.set_kernel()

        expected_improvement = ExpectedImprovement(model=model_emukit)

        bayesopt_loop = BayesianOptimizationLoop(
            model=model_emukit,
            space=space,
            acquisition=expected_improvement,
            batch_size=1)
        max_iterations = config.bo_iter
        bayesopt_loop.run_loop(function_norm, max_iterations)
        results = bayesopt_loop.get_results()
        #scale back the x and y
        results_save = edict()
        results_save.best_found_value_per_iteration = results.best_found_value_per_iteration[
            init_num_data:] * std_Y.item() + mean_Y.item()
        best_value_per_iter[
            ii, :] = results_save.best_found_value_per_iteration
        results_save.minimum_value = results.minimum_value * std_Y.item(
        ) + mean_Y.item()
        results_save.minimum_location = results.minimum_location * std.squeeze(
            0) + mean.squeeze(0)
        results_save.time_elapsed = model_emukit.time_count
        results_list[ii] = results_save

    best_value_mean = np.mean(best_value_per_iter, 0)
    best_value_std = np.std(best_value_per_iter, 0)
    plt.figure(figsize=(12, 8))
    plt.fill_between(np.arange(max_iterations) + 1,
                     best_value_mean - 0.2 * best_value_std,
                     best_value_mean + 0.2 * best_value_std,
                     color='red',
                     alpha=0.15)
    plt.plot(np.arange(max_iterations) + 1,
             best_value_mean,
             'or-',
             lw=2,
             label='Best found function value')
    plt.legend(loc=2, prop={'size': LEGEND_SIZE})
    plt.xlabel(r"iteration")
    plt.ylabel(r"$f(x)$")
    plt.grid(True)
    plt.savefig(image_path, format='pdf')

    return results_list
Пример #17
0
if args.model_type == "bnn":
    model = Bohamiann(X_init=X_init, Y_init=Y_init, verbose=True)

elif args.model_type == "rf":
    model = RandomForest(X_init=X_init, Y_init=Y_init)
    with_gradients = False

elif args.model_type == "dngo":
    model = DNGO(X_init=X_init, Y_init=Y_init)
    with_gradients = False

elif args.model_type == "gp":
    model = BOGP(X_init=X_init, Y_init=Y_init)

if args.acquisition_type == "ei":
    acquisition = ExpectedImprovement(model)
elif args.acquisition_type == "pi":
    acquisition = ProbabilityOfImprovement(model)
elif args.acquisition_type == "nlcb":
    acquisition = NegativeLowerConfidenceBound(model)
elif args.acquisition_type == "logei":
    acquisition = LogExpectedImprovement(model)
elif args.acquisition_type == "entropy_search":
    model = BOGP(X_init=X_init, Y_init=Y_init)
    acquisition = EntropySearch(model, space=space)


# if with_gradients:
#    acquisition_optimizer = AcquisitionOptimizer(space)
# else:
acquisition_optimizer = DirectOptimizer(space)
Пример #18
0
        ppo.model = model_emukit
        bo = BayesianOptimizationLoop(model=model_emukit,
                                      space=parameter_space,
                                      acquisition=ppo,
                                      batch_size=1)
        mu = np.array([np.mean(bo.loop_state.X)])[np.newaxis]
        var = np.array([np.var(bo.loop_state.X)])[np.newaxis]
        s = np.concatenate((mu, var), axis=1)
        boPPOep_r = []

        model_gpyEI = GPRegression(X, Y)  # Train and wrap the model in Emukit
        model_emukitEI = GPyModelWrapper(model_gpyEI)
        boEI = BayesianOptimizationLoop(model=model_emukitEI,
                                        space=parameter_space,
                                        acquisition=ExpectedImprovement(
                                            model=model_emukit,
                                            jitter=Exploration_parameter),
                                        batch_size=1)
        boEIep_r = []
        EIcurMax = 0
        EIcurMin = float('inf')

        model_gpyPI = GPRegression(X, Y)  # Train and wrap the model in Emukit
        model_emukitPI = GPyModelWrapper(model_gpyPI)
        boPI = BayesianOptimizationLoop(model=model_emukitPI,
                                        space=parameter_space,
                                        acquisition=ProbabilityOfImprovement(
                                            model=model_emukit,
                                            jitter=Exploration_parameter),
                                        batch_size=1)
        boPIep_r = []
Пример #19
0
def acquisition():
    rng = np.random.RandomState(42)
    x_init = rng.rand(5, 2)
    y_init = rng.rand(5, 1)
    model = GPRegression(x_init, y_init, RBF(2))
    return ExpectedImprovement(GPyModelWrapper(model))
Пример #20
0
def NonCausal_BO(num_trials, graph, dict_ranges, interventional_data_x, interventional_data_y, costs, 
			observational_samples, functions, min_intervention_value, min_y, intervention_variables, Causal_prior=False):

	## Get do function corresponding to the specified intervention_variables
	function_name = get_do_function_name(intervention_variables)
	do_function = graph.get_all_do()[function_name]

	## Compute input space dimension
	input_space = len(intervention_variables)

	## Initialise matrices for storing 
	current_best_x= np.zeros((num_trials + 1, input_space))
	current_best_y = np.zeros((num_trials + 1, 1))
	current_cost = np.zeros((num_trials + 1, 1))

	## Get functions for mean do and var do
	mean_function_do, var_function_do = mean_var_do_functions(do_function, observational_samples, functions)

	## Get interventional data
	data_x = interventional_data_x.copy()
	data_y = interventional_data_y.copy()

	
	## Assign the initial values 
	current_cost[0] = 0.
	current_best_y[0] = min_y
	current_best_x[0] = min_intervention_value
	cumulative_cost = 0.


	## Compute target function and space parameters
	target_function, space_parameters = Intervention_function(get_interventional_dict(intervention_variables),
																model = graph.define_SEM(), target_variable = 'Y', 
																min_intervention = list_interventional_ranges(graph.get_interventional_ranges(), intervention_variables)[0],
																max_intervention = list_interventional_ranges(graph.get_interventional_ranges(), intervention_variables)[1])


	if Causal_prior==False:
		#### Define the model without Causal prior
		gpy_model = GPy.models.GPRegression(data_x, data_y, GPy.kern.RBF(input_space, lengthscale=1., variance=1.), noise_var=1e-10)
		emukit_model= GPyModelWrapper(gpy_model)
	else:
		#### Define the model with Causal prior
		mf = GPy.core.Mapping(input_space, 1)
		mf.f = lambda x: mean_function_do(x)
		mf.update_gradients = lambda a, b: None
		kernel = CausalRBF(input_space, variance_adjustment=var_function_do, lengthscale=1., variance=1., rescale_variance = 1., ARD = False)
		gpy_model = GPy.models.GPRegression(data_x, data_y, kernel, noise_var=1e-10, mean_function=mf)
		emukit_model = GPyModelWrapper(gpy_model)


	## BO loop
	start_time = time.clock()
	for j in range(num_trials):
		print('Iteration', j)
		## Optimize model and get new evaluation point
		emukit_model.optimize()
		acquisition = ExpectedImprovement(emukit_model)
		optimizer = GradientAcquisitionOptimizer(space_parameters)
		x_new, _ = optimizer.optimize(acquisition)
		y_new = target_function(x_new)

		## Append the data
		data_x = np.append(data_x, x_new, axis=0)
		data_y = np.append(data_y, y_new, axis=0)
		emukit_model.set_data(data_x, data_y)

		## Compute cost
		x_new_dict = get_new_dict_x(x_new, intervention_variables)
		cumulative_cost += total_cost(intervention_variables, costs, x_new_dict)
		current_cost[j + 1] = cumulative_cost

		## Get current optimum
		results = np.concatenate((emukit_model.X, emukit_model.Y), axis =1)
		current_best_y[j + 1 ] = np.min(results[:,input_space])
		if results[results[:,input_space] == np.min(results[:,input_space]), :input_space].shape[0] > 1:
			best_x = results[results[:,input_space] == np.min(results[:,input_space]), :input_space][0]
		else:
			best_x = results[results[:,input_space] == np.min(results[:,input_space]), :input_space]
		print('Current best Y', np.min(results[:,input_space]))

	total_time = time.clock() - start_time

	return (current_cost, current_best_x, current_best_y, total_time)