Esempio n. 1
0
def break_grid_bo(w_, garden_index, grids):
    # first compute convex hull of grids
    # parametrize problem by just the center of the grid
    num_poles = grids.shape[0]
    bounds = []
    for i in range(num_poles):
        name_s = "p" + str(i)
        lower_x = grids[i, 0] - 0.8
        upper_x = grids[i, 0] + 0.8
        lower_y = grids[i, 1] - 0.8
        upper_y = grids[i, 1] + 0.8
        domainx = (lower_x, upper_x)
        domainy = (lower_y, upper_y)
        dx = {'name': name_s + "x", 'type': 'continuous', 'domain': domainx}
        dy = {'name': name_s + "y", 'type': 'continuous', 'domain': domainy}
        bounds.append(dx)
        bounds.append(dy)

    Y_init, pole_platform, cc, _ = all_constraints(w_, garden_index, grids)

    loss = 1000
    current_iter = 0
    X_step = np.zeros((1, num_poles * 2))
    Y_step = np.empty((1, 1))
    Y_step[0, 0] = Y_init
    context = {}

    while loss > 2:
        if current_iter >= 200:
            return grids
        bo_step = BayesianOptimization(f=None,
                                       model="GP",
                                       domain=bounds,
                                       X=X_step,
                                       Y=Y_step,
                                       maximize=False,
                                       acquisition="LCB")
        try:
            x_next = bo_step.suggest_next_locations(context=context)
        except:
            x_next = bo_step.suggest_next_locations()
        grids = x_next.reshape(num_poles, 2)
        loss, pole_platform, cc, good_ind = all_constraints(
            w_, garden_index, grids)
        for i in good_ind:
            context["p{}x".format(i)] = grids[i, 0]
            context["p{}y".format(i)] = grids[i, 1]
        print("Iteration {} has loss {}".format(current_iter, loss))
        print("POLE2PLAT {}".format(pole_platform))
        y_next = loss
        y_tmp = np.empty((1, 1))
        y_tmp[0, 0] = y_next
        X_step = np.vstack((X_step, x_next))
        Y_step = np.vstack((Y_step, y_tmp))
        loss = y_next
        current_iter += 1

    return grids
    def test_next_locations_pending(self):
        func = GPyOpt.objective_examples.experiments1d.forrester()
        domain =[{'name': 'var1', 'type': 'continuous', 'domain': (0,1)}]
        X_init = np.array([[0.0],[0.5],[1.0]])
        Y_init = func.f(X_init)

        np.random.seed(1)
        bo_no_pending = BayesianOptimization(f = None, domain = domain, X = X_init, Y = Y_init)
        x_no_pending = bo_no_pending.suggest_next_locations()

        np.random.seed(1)
        bo_pending = BayesianOptimization(f = None, domain = domain, X = X_init, Y = Y_init, de_duplication = True)
        x_pending = bo_pending.suggest_next_locations(pending_X = x_no_pending)

        self.assertFalse(np.isclose(x_pending, x_no_pending))
    def test_next_locations_pending(self):
        func = GPyOpt.objective_examples.experiments1d.forrester()
        domain =[{'name': 'var1', 'type': 'continuous', 'domain': (0,1)}]
        X_init = np.array([[0.0],[0.5],[1.0]])
        Y_init = func.f(X_init)

        np.random.seed(1)
        bo_no_pending = BayesianOptimization(f = None, domain = domain, X = X_init, Y = Y_init)
        x_no_pending = bo_no_pending.suggest_next_locations()

        np.random.seed(1)
        bo_pending = BayesianOptimization(f = None, domain = domain, X = X_init, Y = Y_init, de_duplication = True)
        x_pending = bo_pending.suggest_next_locations(pending_X = x_no_pending)

        self.assertFalse(np.isclose(x_pending, x_no_pending))
Esempio n. 4
0
    def setUp(self):
        np.random.seed(123)
        domain          = [{'name': 'var1', 'type': 'continuous', 'domain': (-5, 5), 'dimensionality': 5}]
        space           = Design_space(domain)
        func            = alpine1(input_dim=5, bounds=space.get_bounds())
        bo              = BayesianOptimization(f=func.f, domain=domain)
        context         = {'var1_1': 0.3, 'var1_2': 0.4}
        context_manager = ContextManager(space, context)
        x0              = np.array([[0, 0, 0, 0, 0]])

        # initialize the model in a least intrusive way possible
        bo.suggest_next_locations()

        f = bo.acquisition.acquisition_function
        f_df = bo.acquisition.acquisition_function_withGradients
        self.problem_with_context = OptimizationWithContext(x0=x0, f=f, df=None, f_df=f_df, context_manager=context_manager)
        self.x = np.array([[3, -3, 3]])
Esempio n. 5
0
def propose_hypers(bayes_opt_results, bayes_opt_setup):
    """Proposes hyperparameters given the current results.

    Args:
        results: instance of BayesOptResults.
        bayes_opt_setup: instance of BayesOptSetup
    Returns:
        x: the proposed point.
    """
    bayes_opt = BayesianOptimization(f=None, domain=bayes_opt_setup.domain(),
                                     X=bayes_opt_results.xs, Y=bayes_opt_results.ys)
    proposed_x = bayes_opt.suggest_next_locations()

    return proposed_x, bayes_opt
Esempio n. 6
0
def online_bo(initial_grids,
              w_,
              bounds,
              garden_index,
              tol=0.5,
              maximize=False,
              acquisition="LCB",
              model="GP_MCMC"):
    _, _, Y_init = all_constraints(w_, garden_index, initial_grids)
    loss = 1000
    current_iter = 0
    X_step = initial_grids.reshape(1, initial_grids.size)
    Y_step = np.empty((1, 1))
    Y_step[0, 0] = Y_init
    grids = initial_grids

    counts_satisfied = False
    while not counts_satisfied:
        print("ITERATION {}".format(current_iter))
        print("LOSS {}".format(loss))

        bo_step = BayesianOptimization(f=None,
                                       model=model,
                                       domain=bounds,
                                       X=X_step,
                                       Y=Y_step,
                                       maximize=maximize,
                                       acquisition=acquisition)

        x_next = bo_step.suggest_next_locations()
        grids = np.append(grids, x_next.reshape(1, 2), axis=0)
        y_next, grids, counts = all_constraints_online(w_, garden_index, grids)
        counts_satisfied = counts <= 0
        y_tmp = np.empty((1, 1))
        y_tmp[0, 0] = y_next
        X_step = np.vstack((X_step, x_next))
        Y_step = np.vstack((Y_step, y_tmp))
        loss = y_tmp[0, 0]
        current_iter += 1

    return grids
Esempio n. 7
0
    def get_next_hparams(self, sample_x=None, sample_y=None, pending_x=None):
        """
        This function suggest the next list hyperparameters to evaluate according a given sample
        of evaluated list of hyperparameters.

        :param sample_x: A list of dictionary that define all tested hyperparameters.
        :param sample_y: A list that give the score of each list of tested hyperparameters
        :param pending_x: A list of dictionary that define all hyperparameters that are evaluating
                          right now by another process.
        :return: The next list of hyperparameters to evaluate.
        """

        if len(sample_x) < self.initial_random_point:
            random_opt = get_optimizer(self.rand_space, "rand")
            return random_opt.get_next_hparams()

        else:
            sample = np.array([[_dict[key] for key in self.keys]
                               for _dict in sample_x])
            sample_y = [[y] for y in sample_y]

            if pending_x is not None:
                pending_x = np.array([[_dict[key] for key in self.keys]
                                      for _dict in pending_x])

            # We define the surrogate Model
            bo = BayesianOptimization(
                f=None,
                model_type=self.model,
                acquisition_type=self.acq_fct,
                domain=list(self.hp_space.space.values()),
                X=sample,
                Y=sample_y,
                de_duplication=True  # required to consider the pending hparams.
            )

            hp_list = bo.suggest_next_locations(pending_X=pending_x)
            return self._list_to_dict(hp_list[0])
Esempio n. 8
0
def grid_for_platform(platform, bounds,grids,initial):
    #grids = np.empty((1,2))
    X_step = np.zeros((1, 2))
    Y_init = 3
    Y_step = np.empty((1, 1))
    Y_step[0, 0] = Y_init
    loss = 1000
    current_iter = 0
    while loss > 0.0:
        bo_step = BayesianOptimization(f=None, model="GP", domain=bounds, X=X_step, Y=Y_step,
                                       maximize=False, acquisition="LCB")

        x_next = bo_step.suggest_next_locations()
        #print("ITERATION {}".format(grids.shape))
        tmp_point = Point(x_next[0])
        if initial and tmp_point.within(platform):
            grids = x_next.reshape(1, 2)
            current_iter += 1
            initial = False
        elif tmp_point.within(platform):
            tmp_grid = np.append(grids, x_next.reshape(1, 2), axis=0)
            distances = np.linalg.norm(tmp_grid - tmp_grid[:,None], axis=-1)
            mask = np.where(~np.eye(distances.shape[0], dtype=bool)==True)
            if np.all(distances[mask] >= 1.9):
                grids = np.append(grids,x_next.reshape(1, 2), axis=0)
                loss = count_loss(grids,platform)
                #print("COUNT LOSS {}".format(loss))
            else:
                loss = max(count_loss(grids,platform) , (2-np.amin(distances[mask])))
                #print("LOSS {}".format(loss))
        else:
            loss = 1000
        if current_iter>=1:
            current_iter += 1

        #print(grids.shape)

    return grids
Esempio n. 9
0
constraints = [
    {
        'name': 'constr_1',
        'constraint': 'x[:,0] + x[:,1] + x[:,2] + x[:,3] - 90'
    },
]

#%% perform Bayesian optimization
batch_optimizer = BayesianOptimization(f=None,
                                       domain=bds,
                                       constraints=constraints,
                                       model_type='GP',
                                       acquisition_type='EI',
                                       acquisition_jitter=0.1,
                                       X=X_init,
                                       Y=Y_init_1,
                                       evaluator_type='local_penalization',
                                       batch_size=1,
                                       num_cores=16,
                                       minimize=True)

batch_x_next = batch_optimizer.suggest_next_locations()

print(batch_x_next)

cond = np.array(['AgNO3', 'PVA', 'NaOH', 'Hydrazine', 'Flow rate'])

np.savetxt('new_conditions.csv', batch_x_next, delimiter=",")

#%%
Esempio n. 10
0
def run_bo_(w_train_red,
            x_train_red,
            x_rain_sun,
            bottom_top_heights,
            output_attrib,
            all_inds,
            bounds,
            model="GP_MCMC",
            acquisition="LCB",
            maximize=False,
            tol=0.009,
            samples_to_optimize=1):
    w_ = w_train_red
    x_ = x_train_red
    x_1 = x_train_red[samples_to_optimize, :]
    x_1 = x_1.reshape(1, x_1.shape[0])
    true_occ1 = x_1[0, 0]
    true_occ2 = x_1[0, 1]
    print("TRUE OCCLUSION 1 {}".format(true_occ1))
    print("TRUE OCCLUSION 2 {}".format(true_occ2))

    results = np.empty((1, 11))
    gp_results = np.empty((1, 11))
    index = samples_to_optimize

    X_init = np.random.normal(size=(1, 11))
    Y_init, x_gh, w_ = black_box_function(0, all_inds, x_, x_rain_sun, w_,
                                          bottom_top_heights, output_attrib,
                                          X_init)
    loss = 1
    current_iter = 0
    X_step = X_init
    Y_step = Y_init
    print("\n\nProcessing sample {}\n\n".format(index))
    min_loss = loss
    context = {}
    ind_to_fix = 1
    ind_x_fix = 0
    ind_y_fix = 1
    no_more_fix = False
    fix_h = False
    improvement = 0.5
    while tol < loss:
        #if current_iter > 2:
        #    improvement = 0.05
        print("TOL {} and LOSS {}".format(tol, loss))
        print("ITER {} for sample {}".format(current_iter, index))

        bo_step = BayesianOptimization(f=None,
                                       model=model,
                                       domain=bounds,
                                       X=X_step,
                                       Y=Y_step,
                                       maximize=maximize,
                                       acquisition=acquisition)
        if len(context) >= 1:
            try:
                x_next = bo_step.suggest_next_locations(context=context)
            except:
                x_next = bo_step.suggest_next_locations()
        else:
            x_next = bo_step.suggest_next_locations()
        y_next, x_gh, w_ = black_box_function(index, all_inds, x_, x_rain_sun,
                                              w_, bottom_top_heights,
                                              output_attrib, x_next)
        #print("NUM_FIXED = {}".format(len(context)))
        #print("CONTEXT {}".format(context))
        fixed_platforms = X_step[np.argmin(Y_step), :]
        if min_loss < 0.01:
            improvement = 0.019
        #if y_next[0,0] < min_loss:#min_loss - y_next[0,0] > improvement and not no_more_fix:
        #    # fix the next parameter
        #    if not fix_h:
        #        context["x{}".format(ind_to_fix)] = fixed_platforms[ind_x_fix]
        #        context["y{}".format(ind_to_fix)] = fixed_platforms[ind_y_fix]
        #        ind_x_fix += 2
        #        ind_y_fix += 2
        #    else:
        #        context["h{}".format(ind_to_fix)] = fixed_platforms[ind_y_fix]
        #        #ind_x_fix += 2
        #        ind_y_fix += 1

        #    ind_to_fix += 1
        #    if ind_to_fix >= 4:
        #        fix_h = True
        #        ind_to_fix = 1

        x_[index, :2] = x_gh[0, :]
        X_step = np.vstack((X_step, x_next))
        Y_step = np.vstack((Y_step, y_next))

        loss = y_next[0, 0]
        min_loss = min(loss, min_loss)

        current_iter += 1
    print("SMALLEST LOSS {} ".format(np.min(Y_step)))
    #print("OPT PARAMS {}".format(X_step[np.argmin(Y_step), :]))
    results[0, :] = X_step[np.argmin(Y_step), :]

    opt_ind = bo_step.model.predict(bo_step.X)[0].argmin()
    gp_results[0, :] = bo_step.X[opt_ind, :]
    print(bo_step.X[opt_ind, :])
    return results, gp_results
Esempio n. 11
0
def three_per_platform(w_,
                       garden_index,
                       tol=0.0,
                       maximize=False,
                       acquisition="LCB",
                       model="GP_MCMC"):
    grids = init_three_per_platform(w_, garden_index)
    poly_list = get_polygon(w_, garden_index)

    # do one platform at a time, only have three (six) variables
    for platform_index in range(5):
        polygon = poly_list[platform_index]

        _, Y_init, _ = all_constraints(w_,
                                       garden_index,
                                       grids[platform_index *
                                             3:platform_index * 3 + 3, :],
                                       platform_polygon=polygon,
                                       platform_index=platform_index)
        loss = 1000
        current_iter = 0
        X_step = np.zeros((1, 6))

        Y_step = np.empty((1, 1))
        Y_step[0, 0] = Y_init
        minx, miny, maxx, maxy = polygon.bounds
        in_constraint = 4
        pole_2 = 5
        bounds = [
            {
                'name': 'p0x',
                'type': 'continuous',
                'domain': (minx, maxx)
            },
            {
                'name': 'p0y',
                'type': 'continuous',
                'domain': (miny, maxy)
            },
            {
                'name': 'p1x',
                'type': 'continuous',
                'domain': (minx, maxx)
            },
            {
                'name': 'p1y',
                'type': 'continuous',
                'domain': (miny, maxy)
            },
            {
                'name': 'p2x',
                'type': 'continuous',
                'domain': (minx, maxx)
            },
            {
                'name': 'p2y',
                'type': 'continuous',
                'domain': (miny, maxy)
            },
        ]

        while loss > tol or in_constraint > 0 or pole_2 > 0:
            print("ITERATION {} for platform {} has loss {}".format(
                current_iter, platform_index, loss))
            print("IN_CONSTRINT {}".format(in_constraint))
            print("POLE_2POLE {}".format(pole_2))
            bo_step = BayesianOptimization(f=None,
                                           model=model,
                                           domain=bounds,
                                           X=X_step,
                                           Y=Y_step,
                                           maximize=maximize,
                                           acquisition=acquisition)

            x_next = bo_step.suggest_next_locations()
            grids[platform_index * 3:platform_index * 3 +
                  3, :] = x_next.reshape(-1, 2)
            in_constraint, pole_2, pole_plat = all_constraints(
                w_,
                garden_index,
                grids[platform_index * 3:platform_index * 3 + 3, :],
                platform_polygon=polygon,
                platform_index=platform_index)
            y_next = max(in_constraint, pole_2)
            y_tmp = np.empty((1, 1))
            y_tmp[0, 0] = y_next
            X_step = np.vstack((X_step, x_next))
            Y_step = np.vstack((Y_step, y_tmp))
            loss = y_next
            current_iter += 1

        grids[platform_index * 3:platform_index * 3 +
              3, :] = X_step[-1, :].reshape(3, 2)

    # go through grid, make the distance to platform constraint satisfied
    for i in range(grids.shape[0]):

        _, _, Y_init = all_constraints(w_, garden_index, grids[i, :])
        loss = 1000
        current_iter = 0
        X_step = np.zeros((1, 2))

        Y_step = np.empty((1, 1))
        Y_step[0, 0] = Y_init
        minx, miny, maxx, maxy = polygon.bounds
        mx = grids[i, 0]
        my = grids[i, 1]
        in_constraint = 4
        pole_plat = 5
        pole_2 = 5
        bounds = [
            {
                'name': 'p0x',
                'type': 'continuous',
                'domain': (mx - 0.8, mx + 0.8)
            },
            {
                'name': 'p0y',
                'type': 'continuous',
                'domain': (my - 0.8, my + 0.8)
            },
        ]
        while loss > 0.0 or pole_2 > 0:
            print("ITERATION {} for pole {} has loss {}".format(
                current_iter, i, loss))

            print("POLE_plat {}".format(pole_plat))
            bo_step = BayesianOptimization(f=None,
                                           model=model,
                                           domain=bounds,
                                           X=X_step,
                                           Y=Y_step,
                                           maximize=maximize,
                                           acquisition=acquisition)

            x_next = bo_step.suggest_next_locations()
            grids[i, :] = x_next.reshape(1, 2)
            _, pole_2, pole_plat = all_constraints(w_, garden_index,
                                                   grids[i, :])
            y_next = max(pole_plat, pole_2)
            y_tmp = np.empty((1, 1))
            y_tmp[0, 0] = y_next
            X_step = np.vstack((X_step, x_next))
            Y_step = np.vstack((Y_step, y_tmp))
            loss = y_tmp[0, 0]
            current_iter += 1

        grids[i, :] = X_step[-1, :].reshape(1, 2)

    return grids
Esempio n. 12
0
class OptExecutorGpyopt(OptExecutor):
    GPY_MODEL_GP = "GP"
    GPY_MODEL_GPMCMC = "GP_MCMC"
    GPY_MODEL_SPARSEGP = "sparseGP"
    GPY_MODEL_WARPERDGP = "warperdGP"
    GPY_MODEL_INPUTWARPEDGP = "InputWarpedGP"
    GPY_MODEL_RF = "RF"
    GPY_AQUISITION_EI = "EI"
    GPY_AQUISITION_EIMCMC = "EI_MCMC"  # (requires GP_MCMC model).
    GPY_AQUISITION_MPI = "MPI"
    GPY_AQUISITION_MPIMCMC = "MPI_MCMC"  # (requires GP_MCMC model).
    GPY_AQUISITION_LCB = "LCB"
    GPY_AQUISITION_LCBMCMC = "LCB_MCMC"  # (requires GP_MCMC model).
    GPY_TYPE_BANDIT = "bandit"
    GPY_TYPE_DISCRETE = "discrete"
    GPY_TYPE_CONTINUOUS = "continuous"
    GPY_TYPE_CATEGORICAL = "categorical"

    LOWER_LIMIT_QNT_3 = 3  # Experimentally, does not work with lower than 3 rows data

    def __init__(self, json_loaded):
        super().__init__(json_loaded)

        if COMMON_SEED in json_loaded.keys():
            self.set_randomseed(json_loaded[COMMON_SEED])
            np.random.seed(self.rand_seed)

        scope_params = json_loaded[COMMON_SCOPE]
        scope_keys = [
            x for scope_list in scope_params for x in scope_list.keys()
        ]

        domain = []
        for i, key in enumerate(scope_keys):
            try:
                domain.append({
                    'name': key,
                    'type': scope_params[i][key][0],
                    'domain': tuple(scope_params[i][key][1:])
                })
            except AttributeError:
                print('error occurred [{}] on space creation.'.format(key))

        self.domain = domain
        if COMMON_RESULTS in json_loaded:
            self.Y = None
            Y_base = [json_loaded[COMMON_RESULTS][COMMON_LOSSES]]

            X_base = json_loaded[COMMON_RESULTS][COMMON_VALS]
            self.X = None
            if 0 < len(Y_base[0]):
                y_evals = np.empty(shape=[0, 1])
                x_evals = np.empty(shape=[0, len(scope_keys)])
                for i, y in enumerate(Y_base[0]):
                    y_evals = np.vstack([y_evals, y])
                    x = np.array([X_base[key][i] for key in scope_keys])
                    x_evals = np.vstack([x_evals, x])

                self.Y = y_evals
                self.X = x_evals

    @stop_watch_add
    def suggest(self):
        """Return the next parameter suggestion

        >>> import json
        >>> json_str='{"seed":0,"lib":"hyperopt","algo":"tpe","scope":[{"x":["uniform",-10,10]},'
        >>> json_str+='{"y":["uniform",-10,10]}],'
        >>> json_str+='"max_evals":1,'
        >>> json_str+='"results":{"losses":[3.4620,3.192,28.963,19.64,20.458],'
        >>> json_str+='"statuses":["ok","ok","ok","ok","ok"],'
        >>> json_str+='"vals":{"y":[-0.16774,0.3122,-2.416,0.27455,-3.2827],'
        >>> json_str+='"x":[1.857,1.760,4.785,-4.498,2.837]}}}'
        >>> exec=ExecutorFactory.get_executor(json_str)
        >>> reval=json.loads(exec.suggest())

        >>> reval["alog"] == 'tpe'
        True
        >>> reval["scope"]["x"][0] == 4.30378732744839
        True
        >>> reval["scope"]["y"][0] == 0.9762700785464951
        True
        """
        _logger = getLogger(__name__)
        id_qnt = int(self.json_loaded[COMMON_MAXEVALS])
        algorithm_name = "random"

        histo_qnt = 0 if self.Y is None else len(self.Y.ravel())
        if histo_qnt >= self.LOWER_LIMIT_QNT_3:
            algorithm_name = self.json_loaded[COMMON_ALGO]
            if len(algorithm_name.split(",")) > 1:
                acquisition_str = algorithm_name.split(",")[1]
                algorithm_str = algorithm_name.split(",")[0]
            else:
                acquisition_str = self.GPY_AQUISITION_EI
            self.myBopt = BayesianOptimization(
                f=None,
                domain=self.domain,
                model_type=algorithm_str,
                acquisition_type=acquisition_str,
                Y=self.Y,
                X=self.X,
                model_update_interval=2)

            next_x = self.myBopt.suggest_next_locations()
        else:
            _logger.warning(
                "Initial data is not enough. Create random design.")
            from GPyOpt.core.task.space import Design_space
            from GPyOpt.experiment_design.random_design import RandomDesign

            generate_count = self.LOWER_LIMIT_QNT_3 - histo_qnt
            design = RandomDesign(Design_space(self.domain, None))
            next_x = design.get_samples(generate_count)

        arg_names = [space['name'] for space in self.domain]
        statuses = ["new" for x in next_x]
        vals = defaultdict(list)
        for i in range(len(statuses)):
            for j, key in enumerate(arg_names):
                vals[key].append(next_x[i][j])

        results = dict(algo=algorithm_name, statuses=statuses, vals=vals)

        return results
Esempio n. 13
0
def bo_constraint_satisfaction(initial_grids,
                               w_,
                               bounds,
                               garden_index,
                               tol=0.5,
                               maximize=False,
                               acquisition="LCB",
                               model="GP_MCMC"):
    results = np.empty((11, 2))
    gp_results = np.empty((11, 2))

    _, _, Y_init = all_constraints(w_, garden_index, initial_grids)
    loss = 1000
    current_iter = 0
    X_step = initial_grids.reshape(1, 22)
    Y_step = np.empty((1, 1))
    Y_step[0, 0] = Y_init

    context = {}
    num_fixed_poles = 0
    fixed_ind = set()
    counts_satisfied = False
    final_grid = np.array(list(context.values()))
    while not counts_satisfied:
        print("ITERATION {}".format(current_iter))
        print("LOSS {}".format(loss))

        bo_step = BayesianOptimization(f=None,
                                       model=model,
                                       domain=bounds,
                                       X=X_step,
                                       Y=Y_step,
                                       maximize=maximize,
                                       acquisition=acquisition)
        try:
            x_next = bo_step.suggest_next_locations(context=context)
        except:
            x_next = bo_step.suggest_next_locations()
        y_next, good_poles, counts = all_constraints(w_, garden_index, x_next)

        print("good poles {}".format(good_poles))
        print("context {}".format(context))
        num_fixed_poles = len(context)
        print("num fixed poles {}".format(num_fixed_poles))
        # good_indices = list(chain.from_iterable((i, i + 1) for i in good_poles))
        fixed_poles = X_step[np.argmin(Y_step), :]  # [good_indices]
        for p in good_poles:
            fixed_ind.add(p)
            context["p{}_{}".format(p, 1)] = fixed_poles[p]
            context["p{}_{}".format(p, 2)] = fixed_poles[p + 1]
        if len(context) >= 2:
            counts, cl = count_constraint(x_next, w_, garden_index)
            num_not_good = len(cl[cl > 0])
            ind_platform = {
                0: 0,
                1: 0,
                2: 1,
                3: 1,
                4: 2,
                5: 2,
                6: 3,
                7: 3,
                8: 4,
                9: 4
            }
            # if num_not_good <= 4:
            #    platform_not_sat,_ = np.where(cl.reshape(5,2) > 0)
            #    # search in those
            #    # if found, break
            #    print("DOING RANDOM SEARCH IN PLATFORM")
            #    # do new bayesian optimization, stop the previous one
            #    results = search_in_platform(platform_not_sat,w_,garden_index,grids=x_next)
            #    break

            counts_satisfied = counts <= 0
            print("Are the counts satisfied? {}".format(counts_satisfied))

        y_tmp = np.empty((1, 1))
        y_tmp[0, 0] = y_next
        X_step = np.vstack((X_step, x_next))
        Y_step = np.vstack((Y_step, y_tmp))
        loss = y_tmp[0, 0]
        current_iter += 1
        if counts_satisfied:
            # results = x_next.reshape(11,2)
            final_grid = np.array(list(context.values()))
        # print("SMALLEST LOSS {} ".format(np.min(Y_step)))
        # print("OPT PARAMS {}".format(X_step[np.argmin(Y_step), :]))
    results = X_step[np.argmin(Y_step), :].reshape(11, 2)

    opt_ind = bo_step.model.predict(bo_step.X)[0].argmin()
    gp_results = bo_step.X[opt_ind, :].reshape(11, 2)
    # print(bo_step.X[opt_ind, :])
    return results, gp_results, fixed_ind, final_grid
Esempio n. 14
0
def bo_in_platform(platform_ind, w_, garden_index, grids):
    u = 8
    v = 5
    if np.ndim(w_) == 1:
        w_ = w_.reshape((1, -1))
    if w_.shape[1] == ((u + 3) * v - 2):
        w_ = ut.convert_toxyz(w_, v, u, w_.shape[0])
    if grids.size <= 2:
        grids = grids.reshape(1, 2)
    else:
        grids = grids.reshape(int(grids.size / 2), 2)
    if grids.ndim < 2:
        grids = grids.reshape(11, 2)

    grids = np.random.uniform(low=0, high=1, size=(15, 2))
    for platform_index in platform_ind:
        ind_use = np.arange(u * 2) + platform_index * u * 2
        points = w_[garden_index, ind_use].reshape(u, -1)
        points = np.concatenate([points, points[0, :].reshape(1, 2)], axis=0)
        poly = Polygon(points)

        minx, miny, maxx, maxy = poly.bounds
        bounds = [{
            'name': 'p1_1',
            'type': 'continuous',
            'domain': (minx, maxx)
        }, {
            'name': 'p1_2',
            'type': 'continuous',
            'domain': (miny, maxy)
        }, {
            'name': 'p2_1',
            'type': 'continuous',
            'domain': (minx, maxx)
        }, {
            'name': 'p2_2',
            'type': 'continuous',
            'domain': (miny, maxy)
        }, {
            'name': 'p3_1',
            'type': 'continuous',
            'domain': (minx, maxx)
        }, {
            'name': 'p3_2',
            'type': 'continuous',
            'domain': (miny, maxy)
        }]
        poles = np.random.uniform(low=minx, high=maxx, size=(3, 2))
        _, _, Y_init = all_constraints(w_,
                                       garden_index,
                                       poles,
                                       final_check=True)
        poles = poles.reshape(1, 6)
        Y_step = np.empty((1, 1))
        Y_step[0, 0] = Y_init

        count = 0
        context = {}
        while count < 3:
            bo_step = BayesianOptimization(f=None,
                                           model="GP_MCMC",
                                           domain=bounds,
                                           X=poles,
                                           Y=Y_step,
                                           maximize=False,
                                           acquisition="LCB")
            print("COUNT IN EXTERNAL LOOP {} for platform {}".format(
                count, platform_index))

            x_next = bo_step.suggest_next_locations()
            y_next, good_poles, counts = all_constraints(w_,
                                                         garden_index,
                                                         poles,
                                                         final_check=True)
            print("good poles {}".format(good_poles))
            print("context {}".format(context))
            num_fixed_poles = len(context)
            print("num fixed poles {}".format(num_fixed_poles))
            # good_indices = list(chain.from_iterable((i, i + 1) for i in good_poles))
            fixed_poles = poles[np.argmin(Y_step), :]  # [good_indices]
            for p in good_poles:
                context["p{}_{}".format(p, 1)] = fixed_poles[p]
                context["p{}_{}".format(p, 2)] = fixed_poles[p + 1]
                count += 1
            y_tmp = np.empty((1, 1))
            y_tmp[0, 0] = y_next
            poles = np.vstack((poles, x_next))
            Y_step = np.vstack((Y_step, y_tmp))
            loss = y_tmp[0, 0]
            if loss <= 0:
                count += 1

        results = poles[np.argmin(Y_step), :].reshape(3, 2)
        grids[platform_index * 3:platform_index * 3 + 3, :] = results

    return grids
Esempio n. 15
0
def main(n_interv=3):
    if n_interv == 2:
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (0, 6)
        }, {
            'name': 'var_2',
            'type': 'continuous',
            'domain': (0, 0.5)
        }]
        kern = GPy.kern.RBF(input_dim=2,
                            variance=1,
                            lengthscale=[1., 0.05],
                            ARD=True)
        model = GPModel(kernel=kern, noise_var=0.1, max_iters=0)

        teacher_env = create_teacher_env(obs_from_training=True)
        student_final_env = small_base_cenv_fn()

        def bo_objective(thresholds):
            thresholds = np.array(thresholds)
            if thresholds.ndim == 2:
                thresholds = thresholds[0]
            policy = SingleSwitchPolicy(thresholds)
            return evaluate_single_switch_policy(policy, teacher_env,
                                                 student_final_env)
    elif n_interv == 3:
        domain = [{
            'name': 'var_1',
            'type': 'continuous',
            'domain': (-0.5, 5.5)
        }, {
            'name': 'var_2',
            'type': 'continuous',
            'domain': (0, 0.2)
        }, {
            'name': 'var_3',
            'type': 'continuous',
            'domain': (-0.5, 5.5)
        }, {
            'name': 'var_4',
            'type': 'continuous',
            'domain': (0, 0.2)
        }, {
            'name': 'var_5',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }, {
            'name': 'var_6',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }, {
            'name': 'var_7',
            'type': 'discrete',
            'domain': (0, 1, 2)
        }]

        kern = GPy.kern.RBF(input_dim=7,
                            variance=1,
                            lengthscale=[1., 0.05, 1, 0.05, 0.5, 0.5, 0.5],
                            ARD=True)
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(1, 1),
                                    np.array([0, 2]))
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.05, 0.02),
                                    np.array([1, 3]))
        kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.2, 0.2),
                                    np.array([4, 5, 6]))
        kern.variance.set_prior(GPy.priors.Gamma.from_EV(1, 0.2))
        model = GPModel(kernel=kern, noise_var=0.05, max_iters=1000)

        teacher_env = create_teacher_env(obs_from_training=True)
        student_final_env = small_base_cenv_fn()

        def init_teaching_policy(params, name=None):
            params = np.squeeze(np.array(params))
            thresholds = params[:4]
            thresholds = thresholds.reshape(2, 2)
            available_actions = params[4:].astype(np.int64)
            policy = SingleSwitchPolicy(thresholds,
                                        available_actions,
                                        name=name)
            return policy

        def bo_objective(params):
            policy = init_teaching_policy(params)
            return evaluate_single_switch_policy(policy, teacher_env,
                                                 student_final_env)

    # Logging dir
    exp_starting_time = datetime.now().strftime('%d_%m_%y__%H_%M_%S')
    results_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                               os.pardir, os.pardir, os.pardir, 'results',
                               'flake')
    base_dir = os.path.join(results_dir, 'teacher_training', exp_starting_time)
    os.makedirs(base_dir, exist_ok=True)

    my_bo = BayesianOptimization(bo_objective,
                                 domain=domain,
                                 initial_design_numdata=10,
                                 initial_design_type='random',
                                 acquisition_type='LCB',
                                 maximize=True,
                                 normalize_Y=True,
                                 model_update_interval=1,
                                 model=model)

    my_bo.suggest_next_locations()  # Creates the GP model
    my_bo.model.model['Gaussian_noise.variance'].set_prior(
        GPy.priors.Gamma.from_EV(0.01, 0.1))

    t = time.time()
    my_bo.run_optimization(20,
                           report_file=os.path.join(base_dir, 'bo_report.txt'),
                           evaluations_file=os.path.join(
                               base_dir, 'bo_evaluations.csv'),
                           models_file=os.path.join(base_dir, 'bo_model.csv'))
    print(f'Optimization complete in {time.time() - t}')
    print(f'Optimal threshold: {my_bo.x_opt}')
    print(f'Optimal return: {my_bo.fx_opt}')
    np.savez(os.path.join(base_dir, 'solution.npz'),
             xopt=my_bo.x_opt,
             fxopt=my_bo.fx_opt)
    trained_policy = init_teaching_policy(my_bo.x_opt)
    save_path = os.path.join(base_dir, 'trained_teacher')
    trained_policy.save(save_path)
Esempio n. 16
0
def main():
    domain = [{
        'name': 'var_1',
        'type': 'continuous',
        'domain': (-200, 200)
    }, {
        'name': 'var_2',
        'type': 'continuous',
        'domain': (0, 6)
    }, {
        'name': 'var_3',
        'type': 'discrete',
        'domain': (0, 1)
    }, {
        'name': 'var_4',
        'type': 'discrete',
        'domain': (0, 1)
    }]
    kern = GPy.kern.RBF(input_dim=4,
                        variance=1,
                        lengthscale=[20, 1, 0.1, 0.1],
                        ARD=True)
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(20, 4), np.array([0]))
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(1, 0.3),
                                np.array([1]))
    kern.lengthscale.priors.add(GPy.priors.Gamma.from_EV(0.2, 0.2),
                                np.array([2, 3]))
    kern.variance.set_prior(GPy.priors.Gamma.from_EV(1, 0.2))

    model = GPModel(kernel=kern, noise_var=0.01, max_iters=1000)
    # bo_objective = lambda x: x[0, 0] * x[0, 2] + x[0, 1] * x[0, 3]

    exp_starting_time = datetime.now().strftime('%d_%m_%y__%H_%M_%S')
    base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            os.pardir, os.pardir, os.pardir, 'results',
                            'lunar_lander', 'teacher_training',
                            exp_starting_time)
    os.makedirs(base_dir, exist_ok=True)

    def init_teaching_policy(params, name=None):
        params = np.squeeze(np.array(params))
        params = np.copy(params)
        thresholds = params[:2]
        available_actions = params[2:].astype(np.int64)
        policy = SingleSwitchPolicy(thresholds, available_actions)
        return policy

    def bo_objective(params):
        teacher_env_kwargs = dict(sensor_noise=[0.0] * 8,
                                  n_layers=2,
                                  B=120,
                                  time_steps_lim=int(1.5e6),
                                  original=False)
        policy_list = [init_teaching_policy(params) for _ in range(10)]
        return evaluate_parallel(policy_list,
                                 base_dir=base_dir,
                                 teacher_env_kwargs=teacher_env_kwargs)

    # Initialize with one value per configuration
    initial_X = np.array([[0, 3, 0, 0], [0, 3, 0, 1], [0, 3, 1, 0],
                          [0, 3, 1, 1]])

    my_bo = BayesianOptimization(bo_objective,
                                 domain=domain,
                                 initial_design_numdata=0,
                                 initial_design_type='random',
                                 acquisition_type='LCB',
                                 maximize=True,
                                 normalize_Y=True,
                                 model_update_interval=1,
                                 X=initial_X,
                                 model=model)
    my_bo.suggest_next_locations()  # Creates the GP model
    my_bo.model.model['Gaussian_noise.variance'].set_prior(
        GPy.priors.Gamma.from_EV(0.01, 0.1))

    t = time.time()
    my_bo.run_optimization(10,
                           report_file=os.path.join(base_dir, 'bo_report.txt'),
                           evaluations_file=os.path.join(
                               base_dir, 'bo_evaluations.csv'),
                           models_file=os.path.join(base_dir, 'bo_model.csv'),
                           verbosity=True)
    print(f'Optimization complete in {time.time() - t}')
    print(f'Policy with optimal observation: {my_bo.x_opt}')
    print(f'Value of the optimal observation: {my_bo.fx_opt}')

    np.savez(os.path.join(base_dir, 'solution.npz'),
             xopt=my_bo.x_opt,
             fxopt=my_bo.fx_opt,
             X=my_bo.X,
             Y=my_bo.Y)
    trained_policy = init_teaching_policy(my_bo.x_opt)
    save_path = os.path.join(base_dir, 'trained_teacher')
    trained_policy.save(save_path)
Esempio n. 17
0
def grid_bo(w_, garden_index, grids, fit_counts=True):
    # first compute convex hull of grids
    # parametrize problem by just the center of the grid
    points_list = []
    for i in range(grids.shape[0]):
        p = Point(grids[i, :])
        points_list.append(p)

    mtp = MultiPoint(points_list)
    cvx_hull = mtp.convex_hull
    grid_center = cvx_hull.centroid
    grid_center_x = grid_center.xy[0][0]
    grid_center_y = grid_center.xy[1][0]
    gminx, gminy, gmaxx, gmaxy = cvx_hull.bounds

    mtp_g = convex_hull_garden(w_, garden_index).convex_hull
    minx, miny, maxx, maxy = mtp_g.bounds

    bounds = [{
        'name': 'center_x',
        'type': 'continuous',
        'domain': (gminx, gmaxx)
    }, {
        'name': 'center_y',
        'type': 'continuous',
        'domain': (gminy, gmaxy)
    }, {
        'name': 'rotate',
        'type': 'continuous',
        'domain': (0, 180)
    }]

    _, pp, cc = all_constraints(w_, garden_index, grids)
    #if fit_counts:
    #    Y_init = cc
    #    tol = 0.0
    #else:
    #    Y_init = pp
    #    tol = 0.1
    tol = 0.19
    Y_init = max(cc, pp)
    #Y_init = pp
    loss = 1000
    current_iter = 0
    X_step = np.zeros((1, 3))
    X_step[0, 0] = grid_center_x
    X_step[0, 1] = grid_center_y
    Y_step = np.empty((1, 1))
    Y_step[0, 0] = Y_init
    prev_center = X_step

    while loss > tol:
        if current_iter > 200:
            print("OVER THE LIMIT")
            opt_ind = bo_step.model.predict(bo_step.X)[0].argmin()
            x_opt = bo_step.X[opt_ind, :]
            x_opt = x_opt.reshape(1, 3)

            x_opt = move_grid_by_center(x_opt, grids, prev_center)
            return x_opt

        bo_step = BayesianOptimization(f=None,
                                       model="GP",
                                       domain=bounds,
                                       X=X_step,
                                       Y=Y_step,
                                       maximize=False,
                                       acquisition="EI")

        x_next = bo_step.suggest_next_locations()

        grids = move_grid_by_center(x_next, grids, prev_center)
        prev_center = x_next
        _, pp, cc = all_constraints(w_, garden_index, grids)
        loss = max(pp, cc)
        #loss = pp
        #if fit_counts:
        #    loss = cc
        #else:
        #    loss = abs(pp)
        #try:
        #    scale = s_lambda(pp,cc)
        #    print(scale)
        #except:
        #    scale = 1
        #    print("EXCEPTION")
        #if pp == 0 and cc == 0:
        #    loss = 0
        #elif scale > 0:
        #    loss *=scale
        print("ITER {} LOSS {}".format(current_iter, loss))
        y_next = loss
        y_tmp = np.empty((1, 1))
        y_tmp[0, 0] = y_next
        X_step = np.vstack((X_step, x_next))
        Y_step = np.vstack((Y_step, y_tmp))
        loss = y_next

        current_iter += 1
        #if current_iter == 50:
        #    opt_ind = bo_step.model.predict(bo_step.X)[0].argmin()
        #    x_opt = bo_step.X[opt_ind, :]
        #    grids = move_grid_by_center(x_opt, grids, prev_center)
        #    return grids

    return grids