Пример #1
0
class TestEntropySearchAcquisition(unittest.TestCase):
    def setUp(self):
        np.random.seed(1)

        X = np.array([[-1.5, -1], [1, 1.5], [3, 3]])
        y = 2 * -np.array([[-0.1], [.3], [.9]])
        bounds = [(-5, 5)]
        input_dim = X.shape[1]
        kern = GPy.kern.RBF(input_dim, variance=1., lengthscale=1.)
        self.model = GPModel(kern, noise_var=0.0, max_iters=0, optimize_restarts=0)
        self.model.updateModel(X, y, None, None)
        domain = [{'name': 'var_1', 'type': 'continuous', 'domain': bounds[0], 'dimensionality': 2}]
        self.space = Design_space(domain)

        self.mock_optimizer = Mock()

    def test_acquisition_function(self):
        es = AcquisitionEntropySearch(self.model, self.space, MockSampler(self.space))
        acquisition_value = es.acquisition_function(np.array([[1, 1]]))

        assert_allclose(acquisition_value, np.array([[-20.587977]]), 1e-5)

    def test_optimize(self):
        expected_optimum_position = [[0, 0]]
        self.mock_optimizer.optimize.return_value = expected_optimum_position
        es = AcquisitionEntropySearch(self.model, self.space, MockSampler(self.space), optimizer=self.mock_optimizer)

        optimum_position = es.optimize()

        assert optimum_position == expected_optimum_position
Пример #2
0
def plot1D(filename='GP_results/test1.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    x_observed = np.asarray(reader[:, 0])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5)
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 1000
    X_grid = np.linspace(-10, 10, num_points)[:, None]
    #X_grid = np.linspace(-15, 15, num_points)[:,None]
    for i in range(n_rows):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            X = x_observed[0:(5 * i + j + 1)]
            Y = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(X, Y, X_grid)
            sub_mean[i, j].plot(X_grid, mean)
            sub_mean[i, j].fill_between(X_grid[:, 0],
                                        (mean.T + variance.T).T[:, 0],
                                        (mean.T - variance.T).T[:, 0],
                                        facecolor="gray",
                                        alpha=0.15)
            sub_mean[i, j].scatter(X, Y)

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(X_grid)
            sub_acq[i, j].plot(X_grid, alpha_full)
            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1-{timestamp}.pdf')
    #plt.show()
    plt.close()
    return (None)
Пример #3
0
    def setUp(self):
        np.random.seed(1)

        X = np.array([[-1.5, -1], [1, 1.5], [3, 3]])
        y = 2 * -np.array([[-0.1], [.3], [.9]])
        bounds = [(-5, 5)]
        input_dim = X.shape[1]
        kern = GPy.kern.RBF(input_dim, variance=1., lengthscale=1.)
        self.model = GPModel(kern, noise_var=0.0, max_iters=0, optimize_restarts=0)
        self.model.updateModel(X, y, None, None)
        domain = [{'name': 'var_1', 'type': 'continuous', 'domain': bounds[0], 'dimensionality': 2}]
        self.space = Design_space(domain)

        self.mock_optimizer = Mock()
Пример #4
0
class CustomCostModel(CostModel):
    def __init__(self, kernel, cost_withGradients):
        super(CustomCostModel, self).__init__(cost_withGradients)

        # --- Set-up evaluation cost
        if self.cost_type == None:
            self.cost_withGradients = CostModel.constant_cost_withGradients
            self.cost_type = 'Constant cost'

        elif self.cost_type == 'evaluation_time':
            self.cost_model = GPModel(kernel=kernel,
                                      exact_feval=False,
                                      normalize_Y=False,
                                      optimize_restarts=5)
            self.cost_withGradients = self._cost_gp_withGradients
            self.num_updates = 0
        else:
            self.cost_withGradients = cost_withGradients
            self.cost_type = 'Used defined cost'

    def get_model_parameters(self):
        """
        Returns a 2D numpy array with the parameters of the model
        """
        return np.atleast_2d(self.cost_model.get_model_parameters())
Пример #5
0
 def test_save_gp_2d(self):
     k = GPy.kern.Matern52(input_dim=2)
     m = GPModel(kernel=k)
     myBopt = BayesianOptimization(f=self.f_2d,
                                   domain=self.domain_2d,
                                   model=m)
     myBopt.run_optimization(max_iter=1, verbosity=False)
     myBopt.save_models(self.outfile_path)
     self.check_output_model_file(['Iteration'])
Пример #6
0
    def __init__(self, kernel, cost_withGradients):
        super(CustomCostModel, self).__init__(cost_withGradients)

        # --- Set-up evaluation cost
        if self.cost_type == None:
            self.cost_withGradients = CostModel.constant_cost_withGradients
            self.cost_type = 'Constant cost'

        elif self.cost_type == 'evaluation_time':
            self.cost_model = GPModel(kernel=kernel,
                                      exact_feval=False,
                                      normalize_Y=False,
                                      optimize_restarts=5)
            self.cost_withGradients = self._cost_gp_withGradients
            self.num_updates = 0
        else:
            self.cost_withGradients = cost_withGradients
            self.cost_type = 'Used defined cost'
Пример #7
0
 def test_save_gp_2d_ard(self):
     """
     This was previously an edge-case, when some parameters were vectors, the naming of the columns was incorrect
     """
     k = GPy.kern.Matern52(input_dim=2, ARD=True)
     m = GPModel(kernel=k)
     myBopt = BayesianOptimization(f=self.f_2d,
                                   domain=self.domain_2d,
                                   model=m)
     myBopt.run_optimization(max_iter=1, verbosity=False)
     myBopt.save_models(self.outfile_path)
     self.check_output_model_file(['Iteration'])
Пример #8
0
 def get_model(self, config_space):
     from GPyOpt.models import GPModel
     kernel = self.get_kernel(config_space)
     gp_model = GPModel(kernel=kernel,
                        noise_var=None,
                        exact_feval=True,
                        optimizer="lbfgs",
                        max_iters=1000,
                        optimize_restarts=5,
                        sparse=False,
                        num_inducing=10,
                        verbose=False,
                        ARD=self.ARD)
     return GPyGaussianProcess(gp_model)
    m.kern.lengthscale.set_prior(GPy.priors.Gaussian(1, 1))

    m.Gaussian_noise.variance.unconstrain_positive()
    m.Gaussian_noise.variance.set_prior(GPy.priors.Gaussian(10, 10))

    m.optimize('bfgs', max_iters=100)  # Hyper-parameters are optimized here
    print(m)

    f = open(f"GP_results/opt_params_{timestamp}.txt", "a+")
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    text = ansi_escape.sub('', str(m))
    f.write(text + '\n')
    f.close()

    # Find next point
    model = GPModel(optimize_restarts=1, verbose=True)
    model.model = m
    #acq = AcquisitionEI(model, space, jitter = 1)
    acq = AcquisitionLCB(
        model, space,
        exploration_weight=0.5)  # Hardcoded HYPER_PARAMETER!!!!!!!!
    alpha_full = acq.acquisition_function(X_grid)
    magnet_values = X_grid[np.argmin(alpha_full), :]

    print("Min LCB: ", np.argmin(alpha_full), min(alpha_full),
          X_grid[np.argmin(alpha_full), :])
    print("Max LCB: ", np.argmax(alpha_full), max(alpha_full),
          X_grid[np.argmax(alpha_full), :])
    '''
	if (len(magnet_list)==1):
		gp.plot1D(f'GP_results/correctorValues_Distance_{timestamp}.txt')
Пример #10
0
def plot2D(filename='GP_results/test2.txt',
           magnet_list=['h13', 'v13', 'h31', 'v31']):
    ''' Plots at every iteration GP. Requires some manual hardcoded interaction '''

    reader = np.asmatrix(np.loadtxt(filename))
    xy_observed = np.asarray(reader[:, 0:2])  # Hardcoded!!!!!!!!!
    f_observed = np.asarray(reader[:, -1])  # Hardcoded!!!!!!!!!

    n_rows = math.ceil(len(f_observed) / 5) + 1
    f_mean, sub_mean = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_mean.tight_layout()  # to adjust spacing between subplots
    f_sigma, sub_sigma = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_sigma.tight_layout()  # to adjust spacing between subplots
    f_acq, sub_acq = plt.subplots(n_rows, 5, sharex=True, sharey=True)
    f_acq.tight_layout()  # to adjust spacing between subplots

    num_points = 100
    XY_grid = np.mgrid[-10:10:0.3,
                       -10:10:0.3].reshape(2, -1).T  # Hardcoded!!!!!!!!!
    for i in range(n_rows - 1):
        j = 0
        while len(f_observed) > 5 * i + j and j < 5:
            XY = xy_observed[0:(5 * i + j + 1)]
            Z = f_observed[0:(5 * i + j + 1)]
            mean, Cov, variance, m = GP_analysis(XY, Z, XY_grid)
            xx = np.asarray(XY_grid[:, 0])
            yy = np.asarray(XY_grid[:, 1])
            xo = np.asarray(XY[:, 0]).reshape(-1)
            yo = np.asarray(XY[:, 1]).reshape(-1)
            sub_mean[i, j].scatter(xx,
                                   yy,
                                   c=mean.T[0],
                                   vmin=min(mean.T[0]),
                                   vmax=max(mean.T[0]),
                                   edgecolors='none',
                                   cmap='GnBu')
            sub_mean[i, j].scatter(xo, yo, c='k', marker='s')

            sub_sigma[i, j].scatter(xx,
                                    yy,
                                    c=variance,
                                    vmin=min(variance),
                                    vmax=max(variance),
                                    edgecolors='none')
            sub_sigma[i, j].scatter(xo, yo, c='white')

            model = GPModel(optimize_restarts=1, verbose=True)
            model.model = m
            space = Design_space([{
                'name': 'var1',
                'type': 'continuous',
                'domain': (-10, 10)
            }, {
                'name': 'var2',
                'type': 'continuous',
                'domain': (-10, 10)
            }])
            acq = AcquisitionLCB(model, space,
                                 exploration_weight=1)  # Hardcoded!!!!!!!!!
            alpha_full = acq.acquisition_function(XY_grid)
            sub_acq[i, j].scatter(xx,
                                  yy,
                                  c=alpha_full.T[0],
                                  vmin=min(alpha_full.T[0]),
                                  vmax=max(alpha_full.T[0]),
                                  edgecolors='none',
                                  cmap='GnBu')
            sub_acq[i, j].scatter(xo, yo, c='k', marker='s')
            minXY = XY_grid[np.argmin(alpha_full)]
            sub_acq[i, j].scatter(minXY[0], minXY[1], marker='P')

            j = j + 1

    timestamp = (datetime.datetime.now()).strftime("%m-%d_%H-%M-%S")
    f_mean.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_mean.savefig(f'GP_results/dis_mean_M1_M2-{timestamp}.pdf')
    f_sigma.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_sigma.savefig(f'GP_results/dis_sigma_M1_M2-{timestamp}.pdf')
    f_acq.subplots_adjust(wspace=0.3, top=None, bottom=None)
    f_acq.savefig(f'GP_results/dis_acq_M1_M2-{timestamp}.pdf')
    #plt.show()
    plt.close()
Пример #11
0
def main(n_interv=3):
    if n_interv == 2:
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (0, 6)
        }, {
            'name': 'var_2',
            'type': 'continuous',
            'domain': (0, 0.5)
        }]
        kern = GPy.kern.RBF(input_dim=2,
                            variance=1,
                            lengthscale=[1., 0.05],
                            ARD=True)
        model = GPModel(kernel=kern, noise_var=0.1, max_iters=0)

        teacher_env = create_teacher_env(obs_from_training=True)
        student_final_env = small_base_cenv_fn()

        def bo_objective(thresholds):
            thresholds = np.array(thresholds)
            if thresholds.ndim == 2:
                thresholds = thresholds[0]
            policy = SingleSwitchPolicy(thresholds)
            return evaluate_single_switch_policy(policy, teacher_env,
                                                 student_final_env)
    elif n_interv == 3:
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-0.5, 5.5)
        }, {
            'name': 'var_2',
            'type': 'continuous',
            'domain': (0, 0.2)
        }, {
            'name': 'var_3',
            'type': 'continuous',
            'domain': (-0.5, 5.5)
        }, {
            'name': 'var_4',
            'type': 'continuous',
            'domain': (0, 0.2)
        }, {
            'name': 'var_5',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }, {
            'name': 'var_6',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }, {
            'name': 'var_7',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }]

        kern = GPy.kern.RBF(input_dim=7,
                            variance=1,
                            lengthscale=[1., 0.05, 1, 0.05, 0.5, 0.5, 0.5],
                            ARD=True)
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(1, 1),
                                    np.array([0, 2]))
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.05, 0.02),
                                    np.array([1, 3]))
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.2, 0.2),
                                    np.array([4, 5, 6]))
        kern.variance.set_prior(GPy.priors.Gamma.from_EV(1, 0.2))
        model = GPModel(kernel=kern, noise_var=0.05, max_iters=1000)

        teacher_env = create_teacher_env(obs_from_training=True)
        student_final_env = small_base_cenv_fn()

        def init_teaching_policy(params, name=None):
            params = np.squeeze(np.array(params))
            thresholds = params[:4]
            thresholds = thresholds.reshape(2, 2)
            available_actions = params[4:].astype(np.int64)
            policy = SingleSwitchPolicy(thresholds,
                                        available_actions,
                                        name=name)
            return policy

        def bo_objective(params):
            policy = init_teaching_policy(params)
            return evaluate_single_switch_policy(policy, teacher_env,
                                                 student_final_env)

    # Logging dir
    exp_starting_time = datetime.now().strftime('%d_%m_%y__%H_%M_%S')
    results_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                               os.pardir, os.pardir, os.pardir, 'results',
                               'flake')
    base_dir = os.path.join(results_dir, 'teacher_training', exp_starting_time)
    os.makedirs(base_dir, exist_ok=True)

    my_bo = BayesianOptimization(bo_objective,
                                 domain=domain,
                                 initial_design_numdata=10,
                                 initial_design_type='random',
                                 acquisition_type='LCB',
                                 maximize=True,
                                 normalize_Y=True,
                                 model_update_interval=1,
                                 model=model)

    my_bo.suggest_next_locations()  # Creates the GP model
    my_bo.model.model['Gaussian_noise.variance'].set_prior(
        GPy.priors.Gamma.from_EV(0.01, 0.1))

    t = time.time()
    my_bo.run_optimization(20,
                           report_file=os.path.join(base_dir, 'bo_report.txt'),
                           evaluations_file=os.path.join(
                               base_dir, 'bo_evaluations.csv'),
                           models_file=os.path.join(base_dir, 'bo_model.csv'))
    print(f'Optimization complete in {time.time() - t}')
    print(f'Optimal threshold: {my_bo.x_opt}')
    print(f'Optimal return: {my_bo.fx_opt}')
    np.savez(os.path.join(base_dir, 'solution.npz'),
             xopt=my_bo.x_opt,
             fxopt=my_bo.fx_opt)
    trained_policy = init_teaching_policy(my_bo.x_opt)
    save_path = os.path.join(base_dir, 'trained_teacher')
    trained_policy.save(save_path)
Пример #12
0
def main():
    domain = [{
        'name': 'var_1',
        'type': 'continuous',
        'domain': (-200, 200)
    }, {
        'name': 'var_2',
        'type': 'continuous',
        'domain': (0, 6)
    }, {
        'name': 'var_3',
        'type': 'discrete',
        'domain': (0, 1)
    }, {
        'name': 'var_4',
        'type': 'discrete',
        'domain': (0, 1)
    }]
    kern = GPy.kern.RBF(input_dim=4,
                        variance=1,
                        lengthscale=[20, 1, 0.1, 0.1],
                        ARD=True)
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(20, 4), np.array([0]))
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(1, 0.3),
                                np.array([1]))
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.2, 0.2),
                                np.array([2, 3]))
    kern.variance.set_prior(GPy.priors.Gamma.from_EV(1, 0.2))

    model = GPModel(kernel=kern, noise_var=0.01, max_iters=1000)
    # bo_objective = lambda x: x[0, 0] * x[0, 2] + x[0, 1] * x[0, 3]

    exp_starting_time = datetime.now().strftime('%d_%m_%y__%H_%M_%S')
    base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            os.pardir, os.pardir, os.pardir, 'results',
                            'lunar_lander', 'teacher_training',
                            exp_starting_time)
    os.makedirs(base_dir, exist_ok=True)

    def init_teaching_policy(params, name=None):
        params = np.squeeze(np.array(params))
        params = np.copy(params)
        thresholds = params[:2]
        available_actions = params[2:].astype(np.int64)
        policy = SingleSwitchPolicy(thresholds, available_actions)
        return policy

    def bo_objective(params):
        teacher_env_kwargs = dict(sensor_noise=[0.0] * 8,
                                  n_layers=2,
                                  B=120,
                                  time_steps_lim=int(1.5e6),
                                  original=False)
        policy_list = [init_teaching_policy(params) for _ in range(10)]
        return evaluate_parallel(policy_list,
                                 base_dir=base_dir,
                                 teacher_env_kwargs=teacher_env_kwargs)

    # Initialize with one value per configuration
    initial_X = np.array([[0, 3, 0, 0], [0, 3, 0, 1], [0, 3, 1, 0],
                          [0, 3, 1, 1]])

    my_bo = BayesianOptimization(bo_objective,
                                 domain=domain,
                                 initial_design_numdata=0,
                                 initial_design_type='random',
                                 acquisition_type='LCB',
                                 maximize=True,
                                 normalize_Y=True,
                                 model_update_interval=1,
                                 X=initial_X,
                                 model=model)
    my_bo.suggest_next_locations()  # Creates the GP model
    my_bo.model.model['Gaussian_noise.variance'].set_prior(
        GPy.priors.Gamma.from_EV(0.01, 0.1))

    t = time.time()
    my_bo.run_optimization(10,
                           report_file=os.path.join(base_dir, 'bo_report.txt'),
                           evaluations_file=os.path.join(
                               base_dir, 'bo_evaluations.csv'),
                           models_file=os.path.join(base_dir, 'bo_model.csv'),
                           verbosity=True)
    print(f'Optimization complete in {time.time() - t}')
    print(f'Policy with optimal observation: {my_bo.x_opt}')
    print(f'Value of the optimal observation: {my_bo.fx_opt}')

    np.savez(os.path.join(base_dir, 'solution.npz'),
             xopt=my_bo.x_opt,
             fxopt=my_bo.fx_opt,
             X=my_bo.X,
             Y=my_bo.Y)
    trained_policy = init_teaching_policy(my_bo.x_opt)
    save_path = os.path.join(base_dir, 'trained_teacher')
    trained_policy.save(save_path)
Пример #13
0
def main():

    # input the desired rain and sun occlusions

    rain_des = 25  #float(input("Please enter the desired rain_occlusion: "))
    sun_des = 25  #float(input("Please enter the desired sun_occlusion: "))

    x_true = np.array([[rain_des], [sun_des]])
    w_train_red, x_train_red, bottom_top_heights = mpb.load_raw_data(
        list_attrib)
    x_true = x_true.transpose()
    # load the 11 point grid
    global supports
    supports = load_grids()
    output_attrib = ['occlusion_rain', 'occlusion_sun']
    # go one platform at a time
    # how many possibilities?
    # how do i represent this?

    # indicator vector with length of # possible configurations on grid
    # need to discretize the number of possible configurations

    # generate platforms
    global platforms
    platforms = generate_platform.main()

    # X_constraint, y_constraint = compute_possible_positions(supports, platforms)
    #print(y_constraint.shape)
    #y_constraint = y_constraint.reshape(y_constraint.shape[0],1)
    # make GP model for constraint

    #kern = GPy.kern.RBF(input_dim=10,ARD=True)
    #constraint_model = GPy.models.GPRegression(X_constraint,y_constraint,kernel=kern)
    #mean, var = regr.predict()
    constraint_model = GPModel(optimize_restarts=5, verbose=False)
    model = GPModel(optimize_restarts=1, verbose=False)
    lower_xy = -6
    upper_xy = 6

    lower_h = 4.5
    upper_h = 19
    bounds = [
        {
            'name': 'x0',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'y0',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'x1',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'y1',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'x2',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'y2',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'x3',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'y3',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'x4',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        {
            'name': 'y4',
            'type': 'continuous',
            'domain': (lower_xy, upper_xy)
        },
        #{'name': 'h1', 'type': 'continuous', 'domain': (lower_h, upper_h)},
        #{'name': 'h2', 'type': 'continuous', 'domain': (lower_h, upper_h)},
        #{'name': 'h3', 'type': 'continuous', 'domain': (lower_h, upper_h)}
    ]
    design_space = GPyOpt.Design_space(space=bounds)
    acquisition_optimizer = AcquisitionOptimizer(design_space)
    acquisition = jitter_integrated_EI(model=model,
                                       constraint_model=constraint_model,
                                       space=design_space,
                                       optimizer=acquisition_optimizer)
    evaluator = Sequential(acquisition)
    global xy_ind
    xy_ind = list(chain.from_iterable((i, i + 1) for i in range(0, 50, 10)))
    global h_ind
    h_ind = [50, 52, 51]
    global all_inds
    all_inds = xy_ind + h_ind

    resi = run_bo_(platforms,
                   x_train_red,
                   x_true,
                   bottom_top_heights,
                   output_attrib,
                   design_space,
                   model=model,
                   acquisition=acquisition,
                   evaluator=evaluator,
                   tol=0.009,
                   supports=supports)
    platforms[0, xy_ind] = resi.reshape(len(xy_ind), )
    toptobottom.plot_platform_grids(
        platforms,
        x_train_red,
        v=5,
        u=8,
        list_att=['OccRain', 'OccSun', 'Surf', 'Outline'],
        list_plot_x=[[0, 1], [2, 3]],
        samp_to_plot=0,
        grids=supports)